query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Execute begins execution of the query and returns a channel to receive rows.
func (e *Executor) Execute() (<-chan *Row, error) { // Initialize processors. for _, p := range e.processors { p.start() } // Create output channel and stream data in a separate goroutine. out := make(chan *Row, 0) go e.execute(out) return out, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *RawExecutor) Execute(closing <-chan struct{}) <-chan *models.Row {\n\tout := make(chan *models.Row, 0)\n\tgo e.execute(out, closing)\n\treturn out\n}", "func (vc *vdbClient) Execute(query string) (*sqltypes.Result, error) {\n\t// Number of rows should never exceed relayLogMaxItems.\n\treturn vc.ExecuteFetch(query, relayLogMaxItems)\n}", "func (q *Query) Exec(ctx context.Context) (*QueryResult, error) {\n\tvar r QueryResult\n\n\tif q.client == nil || !q.client.Started() {\n\t\treturn &r, fmt.Errorf(\"client or db is nil\")\n\t}\n\n\tswitch q.action {\n\tcase \"select\":\n\t\trows, err := q.execSelect(ctx)\n\t\tr.Rows = rows\n\t\treturn &r, err\n\tcase \"insert\":\n\t\trows, err := q.execInsert(ctx)\n\t\tr.Rows = rows\n\t\treturn &r, err\n\tcase \"update\":\n\t\tvar err error\n\t\tif len(q.returning) == 0 {\n\t\t\tr.RowsAffected, err = q.execUpdate(ctx)\n\t\t} else {\n\t\t\tr.Rows, err = q.execUpdateR(ctx)\n\t\t}\n\t\treturn &r, err\n\tcase \"delete\":\n\t\tvar err error\n\t\tif len(q.returning) == 0 {\n\t\t\tr.RowsAffected, err = q.execDelete(ctx)\n\t\t} else {\n\t\t\tr.Rows, err = q.execDeleteR(ctx)\n\t\t}\n\t\treturn &r, err\n\tdefault:\n\t\treturn &r, fmt.Errorf(\"unsupported action %v\", q.action)\n\t}\n}", "func (vtc *VTConn) Execute(query string, bindVars map[string]interface{}, keyspace string, shards []string) (*mproto.QueryResult, error) {\n\tvtc.mu.Lock()\n\tdefer vtc.mu.Unlock()\n\n\tqr := new(mproto.QueryResult)\n\tallErrors := new(concurrency.AllErrorRecorder)\n\tswitch len(shards) {\n\tcase 0:\n\t\treturn new(mproto.QueryResult), nil\n\tcase 1:\n\t\t// Fast-path for single shard execution\n\t\tvar err error\n\t\tqr, err = vtc.execOnShard(query, bindVars, keyspace, shards[0])\n\t\tallErrors.RecordError(err)\n\tdefault:\n\t\tresults := make(chan *mproto.QueryResult, len(shards))\n\t\tvar wg sync.WaitGroup\n\t\tfor shard := range unique(shards) {\n\t\t\twg.Add(1)\n\t\t\tgo func(shard string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tinnerqr, err := vtc.execOnShard(query, bindVars, keyspace, shard)\n\t\t\t\tif err != nil {\n\t\t\t\t\tallErrors.RecordError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresults <- innerqr\n\t\t\t}(shard)\n\t\t}\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(results)\n\t\t}()\n\t\tfor innerqr := range results {\n\t\t\tappendResult(qr, innerqr)\n\t\t}\n\t}\n\tif allErrors.HasErrors() {\n\t\tif vtc.transactionId != 0 {\n\t\t\terrstr := allErrors.Error().Error()\n\t\t\t// We cannot recover from these errors\n\t\t\tif strings.Contains(errstr, \"tx_pool_full\") || strings.Contains(errstr, \"not_in_tx\") {\n\t\t\t\tvtc.rollback()\n\t\t\t}\n\t\t}\n\t\treturn nil, allErrors.Error()\n\t}\n\treturn qr, nil\n}", "func (stmt *statement) Query(ctx context.Context, db Executor, handler func(rows *sql.Rows)) error {\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\t// Fetch rows\n\trows, err := db.QueryContext(ctx, stmt.String(), stmt.args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Iterate through rows of returned dataset\n\tfor rows.Next() {\n\t\tif len(stmt.dest) > 0 {\n\t\t\terr = rows.Scan(stmt.dest...)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// Call a callback function\n\t\thandler(rows)\n\t}\n\t// Check for errors during rows \"Close\".\n\t// This may be more important if multiple statements are executed\n\t// in a single batch and rows were written as well as read.\n\tif closeErr := rows.Close(); closeErr != nil {\n\t\treturn closeErr\n\t}\n\n\t// Check for row scan errors.\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check for errors during row iteration.\n\treturn rows.Err()\n}", "func (vtc *VTConn) StreamExecute(query string, bindVars map[string]interface{}, keyspace string, shards []string, sendReply func(reply interface{}) error) error {\n\tvtc.mu.Lock()\n\tdefer vtc.mu.Unlock()\n\n\tif vtc.transactionId != 0 {\n\t\treturn fmt.Errorf(\"cannot stream in a transaction\")\n\t}\n\tresults := make(chan *mproto.QueryResult, len(shards))\n\tallErrors := new(concurrency.AllErrorRecorder)\n\tvar wg sync.WaitGroup\n\tfor shard := range unique(shards) {\n\t\twg.Add(1)\n\t\tgo func(shard string) {\n\t\t\tdefer wg.Done()\n\t\t\tsdc, _ := vtc.getConnection(keyspace, shard)\n\t\t\tsr, errFunc := sdc.StreamExecute(query, bindVars)\n\t\t\tfor qr := range sr {\n\t\t\t\tresults <- qr\n\t\t\t}\n\t\t\terr := errFunc()\n\t\t\tif err != nil {\n\t\t\t\tallErrors.RecordError(err)\n\t\t\t}\n\t\t}(shard)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(results)\n\t}()\n\tvar replyErr error\n\tfor innerqr := range results {\n\t\t// We still need to finish pumping\n\t\tif replyErr != nil {\n\t\t\tcontinue\n\t\t}\n\t\treplyErr = sendReply(innerqr)\n\t}\n\tif replyErr != nil {\n\t\tallErrors.RecordError(replyErr)\n\t}\n\treturn allErrors.Error()\n}", "func (stc *ScatterConn) Execute(query string, bindVars map[string]interface{}, keyspace string, shards []string) (*mproto.QueryResult, error) {\n\tstc.mu.Lock()\n\tdefer stc.mu.Unlock()\n\n\tqr := new(mproto.QueryResult)\n\tallErrors := new(concurrency.AllErrorRecorder)\n\tswitch len(shards) {\n\tcase 0:\n\t\treturn qr, nil\n\tcase 1:\n\t\t// Fast-path for single shard execution\n\t\tvar err error\n\t\tqr, err = stc.execOnShard(query, bindVars, keyspace, shards[0])\n\t\tallErrors.RecordError(err)\n\tdefault:\n\t\tresults := make(chan *mproto.QueryResult, len(shards))\n\t\tvar wg sync.WaitGroup\n\t\tfor shard := range unique(shards) {\n\t\t\twg.Add(1)\n\t\t\tgo func(shard string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tinnerqr, err := stc.execOnShard(query, bindVars, keyspace, shard)\n\t\t\t\tif err != nil {\n\t\t\t\t\tallErrors.RecordError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresults <- innerqr\n\t\t\t}(shard)\n\t\t}\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(results)\n\t\t}()\n\t\tfor innerqr := range results {\n\t\t\tappendResult(qr, innerqr)\n\t\t}\n\t}\n\tif allErrors.HasErrors() {\n\t\tif stc.transactionId != 0 {\n\t\t\terrstr := allErrors.Error().Error()\n\t\t\t// We cannot recover from these errors\n\t\t\tif strings.Contains(errstr, \"tx_pool_full\") || strings.Contains(errstr, \"not_in_tx\") {\n\t\t\t\tstc.rollback()\n\t\t\t}\n\t\t}\n\t\treturn nil, allErrors.Error()\n\t}\n\treturn qr, nil\n}", "func (c *Conn) Query(sql string, args ...interface{}) (*Rows, error) {\n\tc.lastActivityTime = time.Now()\n\n\trows := c.getRows(sql, args)\n\n\tif err := c.lock(); err != nil {\n\t\trows.abort(err)\n\t\treturn rows, err\n\t}\n\trows.unlockConn = true\n\n\tps, ok := c.preparedStatements[sql]\n\tif !ok {\n\t\tvar err error\n\t\tps, err = c.Prepare(\"\", sql)\n\t\tif err != nil {\n\t\t\trows.abort(err)\n\t\t\treturn rows, rows.err\n\t\t}\n\t}\n\trows.sql = ps.SQL\n\trows.fields = ps.FieldDescriptions\n\terr := c.sendPreparedQuery(ps, args...)\n\tif err != nil {\n\t\trows.abort(err)\n\t}\n\treturn rows, rows.err\n}", "func (stc *ScatterConn) StreamExecute(query string, bindVars map[string]interface{}, keyspace string, shards []string, sendReply func(reply interface{}) error) error {\n\tstc.mu.Lock()\n\tdefer stc.mu.Unlock()\n\n\tif stc.transactionId != 0 {\n\t\treturn fmt.Errorf(\"cannot stream in a transaction\")\n\t}\n\tresults := make(chan *mproto.QueryResult, len(shards))\n\tallErrors := new(concurrency.AllErrorRecorder)\n\tvar wg sync.WaitGroup\n\tfor shard := range unique(shards) {\n\t\twg.Add(1)\n\t\tgo func(shard string) {\n\t\t\tdefer wg.Done()\n\t\t\tsdc, _ := stc.getConnection(keyspace, shard)\n\t\t\tsr, errFunc := sdc.StreamExecute(query, bindVars)\n\t\t\tfor qr := range sr {\n\t\t\t\tresults <- qr\n\t\t\t}\n\t\t\terr := errFunc()\n\t\t\tif err != nil {\n\t\t\t\tallErrors.RecordError(err)\n\t\t\t}\n\t\t}(shard)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(results)\n\t}()\n\tvar replyErr error\n\tfor innerqr := range results {\n\t\t// We still need to finish pumping\n\t\tif replyErr != nil {\n\t\t\tcontinue\n\t\t}\n\t\treplyErr = sendReply(innerqr)\n\t}\n\tif replyErr != nil {\n\t\tallErrors.RecordError(replyErr)\n\t}\n\treturn allErrors.Error()\n}", "func (e *sqlExecutor) Execute(ctx context.Context, c *sqlconf.Config) error {\n\tif err := c.Validate(); err != nil {\n\t\treturn err\n\t}\n\tdb, err := c.DB()\n\tif err != nil {\n\t\treturn nil\n\t}\n\tif err := setupDB(db, c); err != nil {\n\t\treturn err\n\t}\n\tif c.Concurrent {\n\t\treturn e.execParallel(ctx, db, c)\n\t}\n\tfor _, payload := range c.Payloads {\n\t\t_, err := db.ExecContext(ctx, payload.Exec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (c *ConnWrapper) Query(query string, args []driver.Value) (rows driver.Rows, err error) {\n\tspan, _ := opentracing.StartSpanFromContext(\n\t\temptyCtx,\n\t\tc.integration.getOperationName(c.dsn),\n\t)\n\n\tdefer span.Finish()\n\n\trawSpan, ok := tracer.GetRaw(span)\n\tif ok {\n\t\tc.integration.beforeCall(query, rawSpan, c.dsn)\n\t}\n\ttracer.OnSpanStarted(span)\n\n\tif queryer, ok := c.Conn.(driver.Queryer); ok {\n\t\trows, err = queryer.Query(query, args)\n\t\tif err != nil {\n\t\t\tutils.SetSpanError(span, err)\n\t\t}\n\t\treturn\n\t}\n\n\treturn nil, driver.ErrSkip\n}", "func (p *Pool) Execute(query string) (resp []Response, err error) {\n\tpc, err := p.Get()\n\tif err != nil {\n\t\tfmt.Printf(\"Error aquiring connection from pool: %s\", err)\n\t\treturn nil, err\n\t}\n\tdefer pc.Close()\n\treturn pc.Client.Execute(query)\n}", "func (ctrl *PGCtrl) Execute(q string) error {\n\t_, err := ctrl.conn.Exec(q)\n\treturn err\n}", "func (q *Query) Run() (int, int, error) {\n\treturn q.execute()\n}", "func (c *ConnWrapper) Exec(query string, args []driver.Value) (res driver.Result, err error) {\n\tspan, _ := opentracing.StartSpanFromContext(\n\t\temptyCtx,\n\t\tc.integration.getOperationName(c.dsn),\n\t)\n\n\tdefer span.Finish()\n\n\trawSpan, ok := tracer.GetRaw(span)\n\tif ok {\n\t\tc.integration.beforeCall(query, rawSpan, c.dsn)\n\t}\n\ttracer.OnSpanStarted(span)\n\n\tif execer, ok := c.Conn.(driver.Execer); ok {\n\t\tres, err = execer.Exec(query, args)\n\t\tif err != nil {\n\t\t\tutils.SetSpanError(span, err)\n\t\t}\n\t\treturn\n\t}\n\n\treturn nil, driver.ErrSkip\n}", "func (si *ScanIterator) Execute() Tuple {\n\tresult := si.tuples[si.idx]\n\tsi.idx++\n\treturn result\n}", "func (t *Tx) Execute(q *neoism.CypherQuery) error {\n\tqs := []*neoism.CypherQuery{q}\n\treturn t.ExecuteMany(qs)\n}", "func (stmt *Statement) Exec(params ...interface{}) (\n rows []*Row, res *Result, err os.Error) {\n\n res, err = stmt.Run(params...)\n if err != nil {\n return\n }\n // Read rows\n var row *Row\n for {\n row, err = res.GetRow()\n if err != nil || row == nil {\n break\n }\n rows = append(rows, row)\n }\n return\n}", "func (h StmtHandle) Query(ctx context.Context, args ...interface{}) (*pgx.Rows, error) {\n\th.check()\n\tp := h.s.sr.mcp.Get()\n\tswitch h.s.sr.method {\n\tcase prepare:\n\t\treturn p.QueryEx(ctx, h.s.prepared.Name, nil /* options */, args...)\n\n\tcase noprepare:\n\t\treturn p.QueryEx(ctx, h.s.sql, nil /* options */, args...)\n\n\tcase simple:\n\t\treturn p.QueryEx(ctx, h.s.sql, simpleProtocolOpt, args...)\n\n\tdefault:\n\t\tpanic(\"invalid method\")\n\t}\n}", "func (node *PostgresNode) Execute(dbname string, sql string,\n\tparams ...interface{}) {\n\n\tnode.Fetch(dbname, sql, params...).Close()\n}", "func (c *connImpl) Exec(query string, args []driver.Value) (driver.Result, error) {\n\t// http://code.google.com/p/go-wiki/wiki/InterfaceSlice\n\ttmp := make([]interface{}, len(args))\n\tfor i, arg := range args {\n\t\ttmp[i] = arg\n\t}\n\tif err := c.c.Exec(query, tmp...); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil // FIXME RowAffected/noRows\n}", "func Execute(db *sql.DB, query string, args ...interface{}) error {\n\treturn crdb.Execute(func() error {\n\t\t_, err := db.Exec(query, args...)\n\t\treturn err\n\t})\n}", "func (l *Lock) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {\n\tqr, err := l.Execute(vcursor, bindVars, wantfields)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn callback(qr)\n}", "func (stmt *Statement) Run(params ...interface{}) (res *Result, err os.Error) {\n defer stmt.db.unlockIfError(&err)\n defer catchOsError(&err)\n stmt.db.lock()\n\n if stmt.db.conn == nil {\n return nil, NOT_CONN_ERROR\n }\n if stmt.db.unreaded_rows {\n return nil, UNREADED_ROWS_ERROR\n }\n\n // Bind parameters if any\n if len(params) != 0 {\n stmt.BindParams(params...)\n }\n\n // Send EXEC command with binded parameters\n stmt.sendCmdExec()\n // Get response\n res = stmt.db.getResponse(true)\n res.binary = true\n return\n}", "func (ps *RedisPipelineSessionImpl) Execute() ([]*CommandResponse, error) {\n\tps.sessionResponseChan = make(chan *SessionResponse, len(ps.session))\n\n\tif len(ps.session) < 1 {\n\t\treturn nil, errors.New(\"redis pipeline session is empty\")\n\t}\n\n\tfor nodeIP, sess := range ps.session {\n\t\t//set up response channel to same with other session\n\t\tsess.responseChan = ps.sessionResponseChan\n\t\tif _, ok := ps.pipelineHub.nodes[nodeIP]; !ok {\n\t\t\tif sess.status.stopProcessIfAllowed() {\n\t\t\t\tgo sess.reply(nil, fmt.Errorf(\"node %s is down\", nodeIP))\n\t\t\t}\n\t\t} else {\n\t\t\t//session should be distributed to responsible node\n\t\t\tgo ps.pipelineHub.sendToPipelineHub(nodeIP, sess)\n\t\t}\n\t}\n\n\treturn ps.waitResponse()\n}", "func (b *Blueprint) Execute(conn Connection) []string {\n\tgrammar := conn.GetQueryGrammar()\n\tstatements := b.toSQL(&conn, grammar)\n\n\tfor _, statement := range statements {\n\t\tfmt.Println(statement)\n\t\tif _, err := conn.Exec(statement); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn statements\n}", "func (this *SessionController) QueryRow(jobContext JobExecutionContextInterface){\n\tconst MethodName\t=\t\"QueryRow\"\n\n\t//log some debuf info if in debug mode\n\tthis.Debugf(jobContext.IsDebug(),FormatStruct,jobContext)\n\tthis.Debugf(jobContext.IsDebug(),FormatStruct,jobContext.GetJobData())\n\n\t//store start time- for reporting purposes\n\tjobContext.StartTime()\n\n\t//all defered functions\n\tdefer func() {\n\t\tvar err error\n\t\tif recovered := recover(); recovered != nil {\n\t\t\tswitch v:=recovered.(type){\n\t\t\t\tcase error: err=v\n\t\t\t\tdefault: err=StringError(\"unknown error\")\n\t\t\t}\n\t\t\tthis.Debugf(jobContext.IsDebug(),\"panic %s\\n\",err.Error())\n\t\t\tjobContext.SetErrorMessage(err.Error())\n\t\t}\n\t\t//increase number of attmpts\n\t\tjobContext.IncreaseAttempts()\n\t\t//record data\n\t\tjobContext.StopTime()\n\t\t//notify producer that another job has finished\n\t\tjobContext.Finish()\n\t\t//\n\t\tthis.Debugf(jobContext.IsDebug(),FormatStruct,jobContext)\n\t\tthis.Debugf(jobContext.IsDebug(),FormatStruct,jobContext.GetJobData())\n\t}()\n\n\t//how to use connection pool?\n\tdb, err := sql.Open(\"mysql\", jobContext.GetDsn())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t//close connection hence no side effects\n\tdefer db.Close()\n\t//log.Print(\"connection open \", Dsn)\n\n\t//iterate all 'set'\n\tfor _,stmt:=range jobContext.GetPreSteps() {\n\t\t_, err := db.Exec(stmt)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tthis.Debugf(jobContext.IsDebug(),\"pre-exec: %s\",stmt)\n\t}\n\n\tvar result sql.Result\n\t//all data source details should be well encapsulated\n\t//result, err = db.Exec(jobContext.JobData.Query)\n\tvar value interface{}\n\terr =\tdb.QueryRow(jobContext.GetQuery()).Scan(value)\n\tjobContext.SetValue(value)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t//if driver supports rows affected and last inserted id\n\tif rowsAffected,err:=result.RowsAffected();err==nil{\n\t\tjobContext.SetRowsAffected(uint64(rowsAffected))\n\t}\n\n\tthis.Debugf(jobContext.IsDebug(),\"exec query %s \\n\",jobContext.GetQuery())\n\tthis.Debugf(jobContext.IsDebug(),\"rows affected: %d\\n\",jobContext.GetRowsAffected())\n}", "func (_Trebuchet *TrebuchetTransactor) Execute(opts *bind.TransactOpts, _data [][]byte) (*types.Transaction, error) {\n\treturn _Trebuchet.contract.Transact(opts, \"execute\", _data)\n}", "func (s StmtWrapper) Query(args []driver.Value) (rows driver.Rows, err error) {\n\tspan, _ := opentracing.StartSpanFromContext(\n\t\ts.ctx,\n\t\ts.integration.getOperationName(s.dsn),\n\t)\n\n\tdefer span.Finish()\n\n\trawSpan, ok := tracer.GetRaw(span)\n\tif ok {\n\t\ts.integration.beforeCall(s.query, rawSpan, s.dsn)\n\t}\n\ttracer.OnSpanStarted(span)\n\n\trows, err = s.Stmt.Query(args)\n\tif err != nil {\n\t\tutils.SetSpanError(span, err)\n\t}\n\treturn\n}", "func (d *Database) Execute(query string, args ...interface{}) (sql.Result, error) {\n\tvar result sql.Result\n\n\tstmtIns, err := d.Conn.Prepare(query)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tdefer stmtIns.Close()\n\n\tresult, err = stmtIns.Exec(args...)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\treturn result, nil\n}", "func (_Trebuchet *TrebuchetSession) Execute(_data [][]byte) (*types.Transaction, error) {\n\treturn _Trebuchet.Contract.Execute(&_Trebuchet.TransactOpts, _data)\n}", "func (pool *SessionPool) Execute(stmt string) (*ResultSet, error) {\n\treturn pool.ExecuteWithParameter(stmt, map[string]interface{}{})\n}", "func (r *Repository) Execute(command string, args ...interface{}) (middleware.Result, error) {\n\tconn, err := r.Database.Get()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\terr = conn.Close()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"alert DASRepo.Execute(): close database connection failed.\\n%s\", err.Error())\n\t\t}\n\t}()\n\n\treturn conn.Execute(command, args...)\n}", "func (c *PostgreSQLConnection) Execute(query string, arguments []interface{}) sql.Result {\n\tif arguments == nil {\n\t\tresult, err := c.db.Exec(query)\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[!] Couldn't execute query. Reason %v\", err)\n\t\t\treturn nil\n\t\t}\n\n\t\treturn result\n\t}\n\n\tstmt, err := c.db.Prepare(query)\n\n\tif err != nil {\n\t\tlog.Printf(\"[!] Couldn't prepare statement. Reason %v\", err)\n\t\treturn nil\n\t}\n\n\tresult, err := stmt.Exec(arguments...)\n\n\tif err != nil {\n\t\tlog.Printf(\"[!] Couldn't execute query. Reason %v\", err)\n\t\treturn nil\n\t}\n\n\treturn result\n}", "func (s *stmt) Query(args []driver.Value) (driver.Rows, error) {\n\treturn s.c.Query(s.query, args)\n}", "func (e *ShowMeasurementsExecutor) Execute() <-chan *influxql.Row {\n\t// Create output channel and stream data in a separate goroutine.\n\tout := make(chan *influxql.Row, 0)\n\n\tgo func() {\n\t\t// Open the mappers.\n\t\tfor _, m := range e.mappers {\n\t\t\tif err := m.Open(); err != nil {\n\t\t\t\tout <- &influxql.Row{Err: err}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// Create a set to hold measurement names from mappers.\n\t\tset := map[string]struct{}{}\n\t\t// Iterate through mappers collecting measurement names.\n\t\tfor _, m := range e.mappers {\n\t\t\t// Get the data from the mapper.\n\t\t\tc, err := m.NextChunk()\n\t\t\tif err != nil {\n\t\t\t\tout <- &influxql.Row{Err: err}\n\t\t\t\treturn\n\t\t\t} else if c == nil {\n\t\t\t\t// Mapper had no data.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Convert the mapper chunk to a string array of measurement names.\n\t\t\tmms, ok := c.([]string)\n\t\t\tif !ok {\n\t\t\t\tout <- &influxql.Row{Err: fmt.Errorf(\"show measurements mapper returned invalid type: %T\", c)}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add the measurement names to the set.\n\t\t\tfor _, mm := range mms {\n\t\t\t\tset[mm] = struct{}{}\n\t\t\t}\n\t\t}\n\n\t\t// Convert the set into an array of measurement names.\n\t\tmeasurements := make([]string, 0, len(set))\n\t\tfor mm := range set {\n\t\t\tmeasurements = append(measurements, mm)\n\t\t}\n\t\t// Sort the names.\n\t\tsort.Strings(measurements)\n\n\t\t// Calculate OFFSET and LIMIT\n\t\toff := e.stmt.Offset\n\t\tlim := len(measurements)\n\t\tstmtLim := e.stmt.Limit\n\n\t\tif stmtLim > 0 && off+stmtLim < lim {\n\t\t\tlim = off + stmtLim\n\t\t} else if off > lim {\n\t\t\toff, lim = 0, 0\n\t\t}\n\n\t\t// Put the results in a row and send it.\n\t\trow := &influxql.Row{\n\t\t\tName: \"measurements\",\n\t\t\tColumns: []string{\"name\"},\n\t\t\tValues: make([][]interface{}, 0, len(measurements)),\n\t\t}\n\n\t\tfor _, m := range measurements[off:lim] {\n\t\t\tv := []interface{}{m}\n\t\t\trow.Values = append(row.Values, v)\n\t\t}\n\n\t\tif len(row.Values) > 0 {\n\t\t\tout <- row\n\t\t}\n\n\t\tclose(out)\n\t\t// It's important that all resources are released when execution completes.\n\t\te.close()\n\t}()\n\treturn out\n}", "func ExecuteQuery(c *clientv1.Client, query string) (*clientv1.Response, error) {\n\tlog.Infof(\"action=ExecuteQuery q=%s client=%+v\", query, c)\n\tq := clientv1.Query{\n\t\tCommand: query,\n\t\tDatabase: Settings.Database,\n\t\tChunked: true,\n\t}\n\tresponse, err := c.Query(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}", "func (p *BeeswaxServiceClient) ExecuteAndWait(query *Query, clientCtx LogContextId) (r *QueryHandle, err error) {\n\tif err = p.sendExecuteAndWait(query, clientCtx); err != nil {\n\t\treturn\n\t}\n\treturn p.recvExecuteAndWait()\n}", "func (trd *trxDispatcher) execute() {\n\t// don't forget to sign off after we are done\n\tdefer func() {\n\t\tclose(trd.outAccount)\n\t\tclose(trd.outLog)\n\t\tclose(trd.outTransaction)\n\n\t\ttrd.mgr.finished(trd)\n\t}()\n\n\t// wait for transactions and process them\n\tfor {\n\t\t// try to read next transaction\n\t\tselect {\n\t\tcase <-trd.sigStop:\n\t\t\treturn\n\t\tcase <-trd.bot.C:\n\t\t\ttrd.updateLastSeenBlock()\n\t\tcase evt, ok := <-trd.inTransaction:\n\t\t\t// is the channel even available for reading\n\t\t\tif !ok {\n\t\t\t\tlog.Notice(\"trx channel closed, terminating %s\", trd.name())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif evt.blk == nil || evt.trx == nil {\n\t\t\t\tlog.Criticalf(\"dispatcher dry loop\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttrd.process(evt)\n\t\t}\n\t}\n}", "func (c *sqlmock) Exec(query string, args []driver.Value) (driver.Result, error) {\n\tnamedArgs := make([]driver.NamedValue, len(args))\n\tfor i, v := range args {\n\t\tnamedArgs[i] = driver.NamedValue{\n\t\t\tOrdinal: i + 1,\n\t\t\tValue: v,\n\t\t}\n\t}\n\n\tex, err := c.exec(query, namedArgs)\n\tif ex != nil {\n\t\ttime.Sleep(ex.delay)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ex.result, nil\n}", "func (q *Query) Exec() (*Response, error) {\n\tresp := &Response{}\n\n\t// Connect to socket\n\tconn, err := q.dial()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\n\t// Send command data\n\tconn.Write([]byte(q.buildCmd()))\n\n\t// Read response header\n\tdata := make([]byte, 16)\n\n\t_, err = conn.Read(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp.Status, err = strconv.Atoi(string(data[:3]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Receive response data\n\tbuf := bytes.NewBuffer(nil)\n\n\tfor {\n\t\tdata = make([]byte, 1024)\n\n\t\t_, err = conn.Read(data)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tbuf.Write(bytes.TrimRight(data, \"\\x00\"))\n\t}\n\n\tif buf.Len() == 0 {\n\t\treturn resp, nil\n\t}\n\n\tif resp.Status != 200 {\n\t\treturn nil, fmt.Errorf(buf.String())\n\t}\n\n\t// Parse received data for records\n\tresp.Records, err = q.parse(buf.Bytes())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (p *PgSQL) Execute(query string, args ...interface{}) (sql.Result, error) {\n\tvar result sql.Result\n\n\tstmtIns, err := p.Connection.Prepare(query)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tdefer stmtIns.Close()\n\n\tresult, err = stmtIns.Exec(args...)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\treturn result, nil\n}", "func (q *Query) Execute() (*Result, error) {\n\tv := url.Values{}\n\tv.Set(\"format\", \"json\")\n\tif q.from.IsZero() {\n\t\tv.Set(\"from\", \"-1day\")\n\t} else {\n\t\tv.Set(\"from\", timeToGraphite(q.from))\n\t}\n\tif !q.to.IsZero() {\n\t\tv.Set(\"until\", timeToGraphite(q.to))\n\t}\n\tfor _, t := range q.targets {\n\t\tv.Add(\"target\", t)\n\t}\n\n\tb, err := DefaultConnection.Query(\"/render/\", v)\n\tif err != nil {\n\t\treturn &Result{}, fmt.Errorf(\"Query execution error: %v\", err)\n\t}\n\n\tr, err := unmarshalResult(b)\n\treturn r, err\n}", "func (q *Query) Exec() []byte {\n\tchainReadOnly := tsdb.ReadOnly(q.Path).Refresh()\n\tbstream := chainReadOnly.BlockStream()\n\n\tdata, _ := q.exec(*bstream, true).([]byte)\n\treturn data\n}", "func (c *Connection) Execute(cmd string, printStdout bool) error {\n\tsession, err := c.Client.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\n\tfmt.Printf(\"\\n$ %s\\n\", cmd)\n\tout, err := session.CombinedOutput(cmd)\n\tif err != nil {\n\t\tlog.Printf(\"Error output:%s\\n\", out)\n\t\treturn err\n\t}\n\tif printStdout {\n\t\tlog.Printf(\"%s\\n\", out)\n\t}\n\treturn nil\n}", "func (_Trebuchet *TrebuchetTransactorSession) Execute(_data [][]byte) (*types.Transaction, error) {\n\treturn _Trebuchet.Contract.Execute(&_Trebuchet.TransactOpts, _data)\n}", "func Execute(query string, args ...interface{}) (sql.Result, error){\n result, err := db.Exec(query, args...)\n if err != nil && !debug { \n log.Println(err)\n }\n return result, err\n}", "func (builder QueryBuilder) Execute(ctx context.Context, options ...OperationExecutorOptionFn) (*Response, error) {\n\texecutor := NewDGoExecutor(builder.client)\n\n\tfor _, option := range options {\n\t\toption(executor)\n\t}\n\treturn executor.ExecuteQueries(ctx, builder)\n}", "func (statement *Statement) Execute(currentTable *table.Table) error {\n\tswitch statement.Type {\n\tcase StatementUnknown:\n\t\treturn errors.New(\"Unknown command\")\n\tcase StatementInsert:\n\t\trowbytes, err := helpers.RowToBytes(statement.Row)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = currentTable.Insert(rowbytes); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(\"Added Row to table\")\n\tcase StatementSelect:\n\t\tfor i := int8(0); i <= currentTable.CurrentPage; i++ {\n\t\t\tvar lastRow uint32\n\t\t\tif i == currentTable.CurrentPage {\n\t\t\t\tlastRow = currentTable.RowInPage\n\t\t\t} else {\n\t\t\t\tlastRow = uint32(table.RowsPerPage)\n\t\t\t}\n\t\t\tfor j := 0; uint32(j) < lastRow; j++ {\n\t\t\t\trowBytes := currentTable.ReadRow(i, uint32(j))\n\t\t\t\trowString, err := helpers.BytesToRow(rowBytes)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfmt.Println(rowString.ID, string(bytes.Trim(rowString.Username[:], \"\\x00\")), string(bytes.Trim(rowString.Email[:], \"\\x00\")))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (c *BlockWriterCase) Execute(db *sql.DB) error {\n\tconcurrency := len(c.bws)\n\tch := make(chan error, concurrency)\n\tfor i := 0; i < concurrency; i++ {\n\t\tgo c.bws[i].execute(db, ch)\n\t}\n\n\tfor i := 0; i < concurrency; i++ {\n\t\terr := <-ch\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t}\n\treturn nil\n}", "func (sqlHandler *sqlHandler) Execute() {\n\t// do something...\n}", "func (s *Server) Exec(c *Conn, statement string, args Args) (interface{}, error) {\n\treq := c.acquireRequest(statement, args)\n\tdefer c.releaseRequest(req)\n\treturn s.engine.handlers.exec(req)\n}", "func (s *Stmt) Query(args []driver.Value) (driver.Rows, error) {\n\texecutor, err := exec.BuildExecutor(s.res.dbType, s.txCtx.TransactionMode, s.query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\texecCtx := &types.ExecContext{\n\t\tTxCtx: s.txCtx,\n\t\tQuery: s.query,\n\t\tValues: args,\n\t}\n\n\tret, err := executor.ExecWithValue(context.Background(), execCtx,\n\t\tfunc(ctx context.Context, query string, args []driver.NamedValue) (types.ExecResult, error) {\n\t\t\tret, err := s.stmt.Query(util.NamedValueToValue(args))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn types.NewResult(types.WithRows(ret)), nil\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ret.GetRows(), nil\n}", "func (w *Wrapper) runQuery() (rows *sql.Rows, err error) {\n\tw.cleanBefore()\n\tw.buildQuery()\n\tw.LastQuery = w.query\n\tw.LastParams = w.params\n\t// Calculate the execution time.\n\tvar start time.Time\n\tif w.tracing {\n\t\tstart = time.Now()\n\t}\n\t// Execute the query if the wrapper is executable.\n\tif w.executable {\n\t\tvar stmt *sql.Stmt\n\t\tvar count int\n\n\t\tstmt, err = w.db.Prepare(w.query)\n\t\tif err != nil {\n\t\t\tif w.tracing {\n\t\t\t\tw.saveTrace(err, w.query, start)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\trows, err = stmt.Query(w.params...)\n\t\tif err != nil {\n\t\t\tif w.tracing {\n\t\t\t\tw.saveTrace(err, w.query, start)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\terr = stmt.Close()\n\t\tif err != nil {\n\t\t\tif w.tracing {\n\t\t\t\tw.saveTrace(err, w.query, start)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tcount, err = load(rows, w.destination)\n\t\tif err != nil {\n\t\t\tif w.tracing {\n\t\t\t\tw.saveTrace(err, w.query, start)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tw.count = count\n\t}\n\tif w.tracing {\n\t\tw.saveTrace(err, w.query, start)\n\t}\n\tw.cleanAfter()\n\treturn\n}", "func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) {\n\treturn c.exec(context.Background(), query, args)\n}", "func (q *Querier) Exec(query string, args ...interface{}) (sql.Result, error) {\n\tq.logBefore(query, args)\n\tstart := time.Now()\n\n\tdbtxCtx := q.selectDBTXContext(query)\n\tres, err := dbtxCtx.ExecContext(q.ctx, query, args...)\n\tq.logAfter(query, args, time.Since(start), err)\n\treturn res, err\n}", "func (my *MySQL) Query(sql string, params ...interface{}) (\n rows []*Row, res *Result, err os.Error) {\n\n res, err = my.Start(sql, params...)\n if err != nil {\n return\n }\n // Read rows\n var row *Row\n for {\n row, err = res.GetRow()\n if err != nil || row == nil {\n break\n }\n rows = append(rows, row)\n }\n return\n}", "func (s *stmt) Exec(args []driver.Value) (driver.Result, error) {\n\treturn s.c.Exec(s.query, args)\n}", "func (q *Querier) Exec(query string, args ...interface{}) (sql.Result, error) {\n\tq.logBefore(query, args)\n\tstart := time.Now()\n\tres, err := q.dbtxCtx.ExecContext(q.ctx, query, args...)\n\tq.logAfter(query, args, time.Since(start), err)\n\treturn res, err\n}", "func (c *conn) Query(query string, args []driver.Value) (driver.Rows, error) {\n\treturn c.query(context.Background(), query, args)\n}", "func Query(db *mydb.DB, query string, args ...interface{}) (*sql.Rows, error) {\n\tvar rows *sql.Rows\n\tvar err error\n\toperationDone := make(chan bool)\n\tgo func() {\n\t\tlog.Info(\"Query readReplica DB\")\n\t\treadReplica := dbadmin.ReadReplicaRoundRobin(db)\n\t\tlog.Info(\"ReadReplica DB is \", readReplica)\n\t\trows, err = readReplica.Query(query, args...)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Error while executing query\")\n\t\t}\n\n\t\toperationDone <- true\n\t}()\n\t<-operationDone\n\n\treturn rows, err\n}", "func (s *Set) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantields bool, callback func(*sqltypes.Result) error) error {\n\tpanic(\"implement me\")\n}", "func (c *sqlmock) Query(query string, args []driver.Value) (driver.Rows, error) {\n\tnamedArgs := make([]driver.NamedValue, len(args))\n\tfor i, v := range args {\n\t\tnamedArgs[i] = driver.NamedValue{\n\t\t\tOrdinal: i + 1,\n\t\t\tValue: v,\n\t\t}\n\t}\n\n\tex, err := c.query(query, namedArgs)\n\tif ex != nil {\n\t\ttime.Sleep(ex.delay)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ex.rows, nil\n}", "func (p *CassandraClient) ExecutePreparedCqlQuery(itemId int32, values [][]byte) (r *CqlResult_, err error) {\n\tif err = p.sendExecutePreparedCqlQuery(itemId, values); err != nil {\n\t\treturn\n\t}\n\treturn p.recvExecutePreparedCqlQuery()\n}", "func (d *DB) Query(table string, idx string, args ...interface{}) ([]interface{}, error) {\n\tif d.Conn == nil {\n\t\tpanic(\"database is not initialized\")\n\t}\n\n\ttxn := d.Conn.Txn(false)\n\n\tit, err := txn.Get(table, idx, args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults := make([]interface{}, 0)\n\tfor obj := it.Next(); obj != nil; obj = it.Next() {\n\t\tresults = append(results, obj)\n\t}\n\n\treturn results, nil\n}", "func (databaseConfig *DatabaseConfig) Execute(\n\tdbCallback func(db *dbsql.DB) error,\n) (err error) {\n\n\tif databaseConfig.db == nil {\n\t\treturn fmt.Errorf(\"Need open connection of database\")\n\t}\n\n\treturn dbCallback(databaseConfig.db)\n}", "func (w *Wrapper) exec(query string, args ...interface{}) (sql.Result, error) {\n\tw.connLock.RLock()\n\tdefer w.connLock.RUnlock()\n\n\treturn w.connection.Exec(w.prepare(query), args...)\n}", "func (q *Querier) Query(query string, args ...interface{}) (*sql.Rows, error) {\n\tq.logBefore(query, args)\n\tstart := time.Now()\n\trows, err := q.dbtxCtx.QueryContext(q.ctx, query, args...)\n\tq.logAfter(query, args, time.Since(start), err)\n\treturn rows, err\n}", "func (d *sqlDB) Exec(query string, args ...interface{}) (result sql.Result, err error) {\n\terr = d.retry(func() error {\n\t\tresult, err = d.conn.Exec(query, args...)\n\t\treturn err\n\t})\n\treturn\n}", "func (c Client) Query(db, cmd string, result interface{}) (err error) {\n\tquery := client.Query{\n\t\tCommand: cmd,\n\t\tDatabase: db,\n\t\tChunked: false,\n\t\tChunkSize: 100,\n\t}\n\n\tvar response *client.Response\n\tresponse, err = c.client.Query(query)\n\n\tif response.Error() != nil {\n\t\treturn response.Error()\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresults := response.Results\n\tif len(results) < 1 || len(results[0].Series) < 1 {\n\t\treturn\n\t}\n\n\tseries := results[0].Series[0]\n\n\terr = decode(series.Columns, series.Values, result)\n\n\treturn\n}", "func (e *Executor) execute(out chan *Row) {\n\t// TODO: Support multi-value rows.\n\n\t// Initialize map of rows by encoded tagset.\n\trows := make(map[string]*Row)\n\n\t// Combine values from each processor.\nloop:\n\tfor {\n\t\t// Retrieve values from processors and write them to the approprite\n\t\t// row based on their tagset.\n\t\tfor i, p := range e.processors {\n\t\t\t// Retrieve data from the processor.\n\t\t\tm, ok := <-p.C()\n\t\t\tif !ok {\n\t\t\t\tbreak loop\n\t\t\t}\n\n\t\t\t// Set values on returned row.\n\t\t\tfor k, v := range m {\n\t\t\t\t// Extract timestamp and tag values from key.\n\t\t\t\tb := []byte(k)\n\t\t\t\ttimestamp := int64(binary.BigEndian.Uint64(b[0:8]))\n\n\t\t\t\t// Lookup row values and populate data.\n\t\t\t\tvalues := e.createRowValuesIfNotExists(rows, e.processors[0].name(), b[8:], timestamp)\n\t\t\t\tvalues[i+1] = v\n\t\t\t}\n\t\t}\n\t}\n\n\t// Normalize rows and values.\n\t// This converts the timestamps from nanoseconds to microseconds.\n\ta := make(Rows, 0, len(rows))\n\tfor _, row := range rows {\n\t\tfor _, values := range row.Values {\n\t\t\tvalues[0] = values[0].(int64) / int64(time.Microsecond)\n\t\t}\n\t\ta = append(a, row)\n\t}\n\tsort.Sort(a)\n\n\t// Send rows to the channel.\n\tfor _, row := range a {\n\t\tout <- row\n\t}\n\n\t// Mark the end of the output channel.\n\tclose(out)\n}", "func (q *Querier) Query(query string, args ...interface{}) (*sql.Rows, error) {\n\tq.logBefore(query, args)\n\tstart := time.Now()\n\n\tdbtxCtx := q.selectDBTXContext(query)\n\trows, err := dbtxCtx.QueryContext(q.ctx, query, args...)\n\tq.logAfter(query, args, time.Since(start), err)\n\treturn rows, err\n}", "func (m *MSSQLDatastore) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {\n\tif m == nil {\n\t\treturn nil, ErrEmptyObject\n\t}\n\n\tif _, ok := ctx.Deadline(); !ok {\n\t\tvar cancel context.CancelFunc\n\t\tctx, cancel = context.WithTimeout(ctx, QueryLimit)\n\t\tdefer cancel()\n\t}\n\n\treturn m.db.ExecContext(ctx, query, args...)\n}", "func (p PostgreSQL) Query(sql string, args ...interface{}) (pgx.Rows, error) {\n\tif p.Connection == nil {\n\t\treturn nil, fmt.Errorf(\"connection is not created\")\n\t}\n\n\treturn p.Connection.Query(context.Background(), sql, args...)\n}", "func (mcr *MiddlewareClusterRepo) Execute(command string, args ...interface{}) (middleware.Result, error) {\n\tconn, err := mcr.Database.Get()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\terr = conn.Close()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"metadata MiddlewareClusterRepo.Execute(): close database connection failed.\\n%s\", err.Error())\n\t\t}\n\t}()\n\n\treturn conn.Execute(command, args...)\n}", "func (c *Conn) Execute(m Message, family uint16, flags netlink.HeaderFlags) ([]Message, error) {\n\tnm, err := packMessage(m, family, flags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmsgs, err := c.c.Execute(nm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn unpackMessages(msgs)\n}", "func (c *Conn) Query(ctx context.Context, sql string, args ...any) (Rows, error) {\n\tif c.queryTracer != nil {\n\t\tctx = c.queryTracer.TraceQueryStart(ctx, c, TraceQueryStartData{SQL: sql, Args: args})\n\t}\n\n\tif err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {\n\t\tif c.queryTracer != nil {\n\t\t\tc.queryTracer.TraceQueryEnd(ctx, c, TraceQueryEndData{Err: err})\n\t\t}\n\t\treturn &baseRows{err: err, closed: true}, err\n\t}\n\n\tvar resultFormats QueryResultFormats\n\tvar resultFormatsByOID QueryResultFormatsByOID\n\tmode := c.config.DefaultQueryExecMode\n\tvar queryRewriter QueryRewriter\n\noptionLoop:\n\tfor len(args) > 0 {\n\t\tswitch arg := args[0].(type) {\n\t\tcase QueryResultFormats:\n\t\t\tresultFormats = arg\n\t\t\targs = args[1:]\n\t\tcase QueryResultFormatsByOID:\n\t\t\tresultFormatsByOID = arg\n\t\t\targs = args[1:]\n\t\tcase QueryExecMode:\n\t\t\tmode = arg\n\t\t\targs = args[1:]\n\t\tcase QueryRewriter:\n\t\t\tqueryRewriter = arg\n\t\t\targs = args[1:]\n\t\tdefault:\n\t\t\tbreak optionLoop\n\t\t}\n\t}\n\n\tif queryRewriter != nil {\n\t\tvar err error\n\t\toriginalSQL := sql\n\t\toriginalArgs := args\n\t\tsql, args, err = queryRewriter.RewriteQuery(ctx, c, sql, args)\n\t\tif err != nil {\n\t\t\trows := c.getRows(ctx, originalSQL, originalArgs)\n\t\t\terr = fmt.Errorf(\"rewrite query failed: %v\", err)\n\t\t\trows.fatal(err)\n\t\t\treturn rows, err\n\t\t}\n\t}\n\n\t// Bypass any statement caching.\n\tif sql == \"\" {\n\t\tmode = QueryExecModeSimpleProtocol\n\t}\n\n\tc.eqb.reset()\n\tanynil.NormalizeSlice(args)\n\trows := c.getRows(ctx, sql, args)\n\n\tvar err error\n\tsd, explicitPreparedStatement := c.preparedStatements[sql]\n\tif sd != nil || mode == QueryExecModeCacheStatement || mode == QueryExecModeCacheDescribe || mode == QueryExecModeDescribeExec {\n\t\tif sd == nil {\n\t\t\tsd, err = c.getStatementDescription(ctx, mode, sql)\n\t\t\tif err != nil {\n\t\t\t\trows.fatal(err)\n\t\t\t\treturn rows, err\n\t\t\t}\n\t\t}\n\n\t\tif len(sd.ParamOIDs) != len(args) {\n\t\t\trows.fatal(fmt.Errorf(\"expected %d arguments, got %d\", len(sd.ParamOIDs), len(args)))\n\t\t\treturn rows, rows.err\n\t\t}\n\n\t\trows.sql = sd.SQL\n\n\t\terr = c.eqb.Build(c.typeMap, sd, args)\n\t\tif err != nil {\n\t\t\trows.fatal(err)\n\t\t\treturn rows, rows.err\n\t\t}\n\n\t\tif resultFormatsByOID != nil {\n\t\t\tresultFormats = make([]int16, len(sd.Fields))\n\t\t\tfor i := range resultFormats {\n\t\t\t\tresultFormats[i] = resultFormatsByOID[uint32(sd.Fields[i].DataTypeOID)]\n\t\t\t}\n\t\t}\n\n\t\tif resultFormats == nil {\n\t\t\tresultFormats = c.eqb.ResultFormats\n\t\t}\n\n\t\tif !explicitPreparedStatement && mode == QueryExecModeCacheDescribe {\n\t\t\trows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, sd.ParamOIDs, c.eqb.ParamFormats, resultFormats)\n\t\t} else {\n\t\t\trows.resultReader = c.pgConn.ExecPrepared(ctx, sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, resultFormats)\n\t\t}\n\t} else if mode == QueryExecModeExec {\n\t\terr := c.eqb.Build(c.typeMap, nil, args)\n\t\tif err != nil {\n\t\t\trows.fatal(err)\n\t\t\treturn rows, rows.err\n\t\t}\n\n\t\trows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)\n\t} else if mode == QueryExecModeSimpleProtocol {\n\t\tsql, err = c.sanitizeForSimpleQuery(sql, args...)\n\t\tif err != nil {\n\t\t\trows.fatal(err)\n\t\t\treturn rows, err\n\t\t}\n\n\t\tmrr := c.pgConn.Exec(ctx, sql)\n\t\tif mrr.NextResult() {\n\t\t\trows.resultReader = mrr.ResultReader()\n\t\t\trows.multiResultReader = mrr\n\t\t} else {\n\t\t\terr = mrr.Close()\n\t\t\trows.fatal(err)\n\t\t\treturn rows, err\n\t\t}\n\n\t\treturn rows, nil\n\t} else {\n\t\terr = fmt.Errorf(\"unknown QueryExecMode: %v\", mode)\n\t\trows.fatal(err)\n\t\treturn rows, rows.err\n\t}\n\n\tc.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.\n\n\treturn rows, rows.err\n}", "func (b *Batch) Execute() error {\n\tif len(b.jobs) == 0 {\n\t\treturn errors.New(\"No job provided\")\n\t}\n\n\tif _, err := b.redisClient.Pipelined(func(pipe *redis.Pipeline) error {\n\t\tpipe.SAdd(fmt.Sprintf(\"%s:queues\", b.namespace), b.queue)\n\n\t\tfor _, job := range b.jobs {\n\t\t\tbuffer, err := job.Marshal()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := pipe.RPush(\n\t\t\t\tfmt.Sprintf(\"%s:queue:%s\", b.namespace, b.queue),\n\t\t\t\tbuffer,\n\t\t\t).Err(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tb.jobs = b.jobs[:0]\n\n\treturn nil\n}", "func (s *Session) Query(stmt string, values ...interface{}) *Query {\n\tq := s.Session.Query(stmt, values...)\n\treturn wrapQuery(q, s.hosts, s.opts...)\n}", "func (s *Session) Query(query string, args ...interface{}) (*sql.Rows, error) {\n\tif s.tx != nil {\n\t\treturn s.tx.Query(query, args...)\n\t}\n\tif s.useMaster {\n\t\treturn s.db.Master().Query(query, args...)\n\t}\n\treturn s.db.Slave().Query(query, args...)\n}", "func (s *Shard) Exec(query string, args ...interface{}) (sql.Result, errs.Err) {\n\tfixArgs(args)\n\tres, stdErr := s.sqlConn.Exec(query, args...)\n\tif stdErr != nil {\n\t\treturn nil, errs.Wrap(stdErr, errInfo(\"Exec sqlConn.Exec() error\", query, args))\n\t}\n\treturn res, nil\n}", "func (d *sqlDB) Query(query string, args ...interface{}) (rows *sql.Rows, err error) {\n\terr = d.retry(func() error {\n\t\trows, err = d.conn.Query(query, args...)\n\t\treturn err\n\t})\n\treturn\n}", "func (upd *Update) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {\n\treturn fmt.Errorf(\"query %q cannot be used for streaming\", upd.Query)\n}", "func (p *CassandraClient) ExecuteCqlQuery(query []byte, compression Compression) (r *CqlResult_, err error) {\n\tif err = p.sendExecuteCqlQuery(query, compression); err != nil {\n\t\treturn\n\t}\n\treturn p.recvExecuteCqlQuery()\n}", "func (err *ErrBytesRecv) Execute() error {\n\treturn executeCommand(err.config)\n}", "func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) {\n\treturn c.exec(context.Background(), query, toNamedValues(args))\n}", "func (w *Wrapper) executeQuery() (res sql.Result, err error) {\n\tw.cleanBefore()\n\tw.buildQuery()\n\tw.LastQuery = w.query\n\tw.LastParams = w.params\n\t// Calculate the execution time.\n\tvar start time.Time\n\tif w.tracing {\n\t\tstart = time.Now()\n\t}\n\t// Execute the query if the wrapper is executable.\n\tif w.executable {\n\t\tvar stmt *sql.Stmt\n\t\tvar count int64\n\t\tstmt, err = w.db.Prepare(w.query)\n\t\tif err != nil {\n\t\t\tif w.tracing {\n\t\t\t\tw.saveTrace(err, w.query, start)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tres, err = stmt.Exec(w.params...)\n\t\tif err != nil {\n\t\t\tif w.tracing {\n\t\t\t\tw.saveTrace(err, w.query, start)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tw.LastResult = res\n\t\tcount, err = res.RowsAffected()\n\t\tif err != nil {\n\t\t\tif w.tracing {\n\t\t\t\tw.saveTrace(err, w.query, start)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tw.count = int(count)\n\t\terr = stmt.Close()\n\t\tif err != nil {\n\t\t\tif w.tracing {\n\t\t\t\tw.saveTrace(err, w.query, start)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\tif w.tracing {\n\t\tw.saveTrace(err, w.query, start)\n\t}\n\tw.cleanAfter()\n\treturn\n}", "func (lq LoggingQueryable) Exec(query string, args ...interface{}) (result sql.Result, err error) {\n\tresult, err = lq.Q.Exec(query, args...)\n\tlog.Printf(\"SQL: Exec(%v, %v) -> %v\\n\", query, args, err)\n\treturn result, err\n}", "func (e *SqlExecutor) Exec(query string, args ...interface{}) (sql.Result, error) {\n\thook := e.db.ExecutorHook()\n\thook.BeforeExec(e.ctx, query, args...)\n\tv, err := e.SqlExecutor.Exec(query, args...)\n\thook.AfterExec(e.ctx, query, args...)\n\treturn v, err\n}", "func (p *queryPlan) Execute(ctx context.Context) (*table.Table, error) {\n\t// Fetch and cache graph instances.\n\tinputGraphNames := p.stm.InputGraphNames()\n\ttracer.V(3).Trace(p.tracer, func() *tracer.Arguments {\n\t\treturn &tracer.Arguments{\n\t\t\tMsgs: []string{fmt.Sprintf(\"Caching graph instances for graphs %v\", inputGraphNames)},\n\t\t}\n\t})\n\tif err := p.stm.Init(ctx, p.store); err != nil {\n\t\treturn nil, err\n\t}\n\tp.grfs = p.stm.InputGraphs()\n\t// Retrieve the data.\n\tlo := p.stm.GlobalLookupOptions()\n\tloStr := lo.String()\n\ttracer.V(2).Trace(p.tracer, func() *tracer.Arguments {\n\t\treturn &tracer.Arguments{\n\t\t\tMsgs: []string{fmt.Sprintf(\"Setting global lookup options to %s\", loStr)},\n\t\t}\n\t})\n\tif err := p.processGraphPattern(ctx, lo); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := p.projectAndGroupBy(); err != nil {\n\t\treturn nil, err\n\t}\n\tp.orderBy()\n\terr := p.having()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.limit()\n\tif p.tbl.NumRows() == 0 {\n\t\t// Correct the bindings.\n\t\tt, err := table.New(p.stm.OutputBindings())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tp.tbl = t\n\t}\n\treturn p.tbl, nil\n}", "func (q *Query) Execute() (Result, error) {\n\tif q.limit == 0 {\n\t\tq.result.Release()\n\t\treturn EmptyResult, nil\n\t}\n\n\tq.sets.RLock()\n\tdefer q.sets.RUnlock()\n\tq.sets.Sort()\n\n\tif q.sort == nil {\n\t\tif q.sets.l == 0 {\n\t\t\tq.result.Release()\n\t\t\treturn EmptyResult, nil\n\t\t}\n\t\tq.sort = q.sets.Shift()\n\t}\n\n\tif q.sort.Len() == 0 {\n\t\tq.result.Release()\n\t\treturn EmptyResult, nil\n\t}\n\n\tl := q.sets.l\n\tif l == 0 {\n\t\treturn q.execute(noFilter)\n\t}\n\n\tsl := q.sets.s[0].Len()\n\tif sl == 0 {\n\t\tq.result.Release()\n\t\treturn EmptyResult, nil\n\t}\n\n\tq.sort.RLock()\n\tdefer q.sort.RUnlock()\n\n\tif sl < SmallSetTreshold && q.sort.Len() > 1000 && q.sort.CanRank() && q.around == 0 {\n\t\treturn q.setExecute(q.getFilter(l, 1))\n\t}\n\treturn q.execute(q.getFilter(l, 0))\n}", "func SelectExecute(profile *sqprofile.SQProfile, q *sqtables.Query) (*sqtables.DataSet, error) {\n\n\tdata, err := q.GetRowData(profile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If Select DISTINCT then filter out duplicates\n\tif q.IsDistinct {\n\t\tdata.Distinct()\n\t}\n\n\tif q.OrderBy != nil || len(q.OrderBy) > 0 {\n\t\terr = data.SetOrder(q.OrderBy)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = data.Sort()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn data, nil\n}", "func (db *DB) Query(ctx context.Context, query SQLQuery, args ...interface{}) (*sql.Rows, error) {\n\tif err := query.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn db.getReplica().QueryContext(ctx, query.String(), args...)\n}", "func (this *Database) Exec(sql string, args ...interface{}) sql.Result {\n\t//fmt.Printf(\"About to EXEC\\n\")\n\tresult, err := this.db.Exec(sql, args...)\n\t//fmt.Printf(\"Done EXEC\\n\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn result\n}", "func (s *HTTPServer) preparedQueryExecute(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {\n\targs := structs.PreparedQueryExecuteRequest{\n\t\tQueryIDOrName: id,\n\t\tAgent: structs.QuerySource{\n\t\t\tNode: s.agent.config.NodeName,\n\t\t\tDatacenter: s.agent.config.Datacenter,\n\t\t\tSegment: s.agent.config.SegmentName,\n\t\t},\n\t}\n\ts.parseSource(req, &args.Source)\n\tif done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {\n\t\treturn nil, nil\n\t}\n\tif err := parseLimit(req, &args.Limit); err != nil {\n\t\treturn nil, fmt.Errorf(\"Bad limit: %s\", err)\n\t}\n\n\tparams := req.URL.Query()\n\tif raw := params.Get(\"connect\"); raw != \"\" {\n\t\tval, err := strconv.ParseBool(raw)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error parsing 'connect' value: %s\", err)\n\t\t}\n\n\t\targs.Connect = val\n\t}\n\n\tvar reply structs.PreparedQueryExecuteResponse\n\tdefer setMeta(resp, &reply.QueryMeta)\n\n\tif args.QueryOptions.UseCache {\n\t\traw, m, err := s.agent.cache.Get(cachetype.PreparedQueryName, &args)\n\t\tif err != nil {\n\t\t\t// Don't return error if StaleIfError is set and we are within it and had\n\t\t\t// a cached value.\n\t\t\tif raw != nil && m.Hit && args.QueryOptions.StaleIfError > m.Age {\n\t\t\t\t// Fall through to the happy path below\n\t\t\t} else {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tdefer setCacheMeta(resp, &m)\n\t\tr, ok := raw.(*structs.PreparedQueryExecuteResponse)\n\t\tif !ok {\n\t\t\t// This should never happen, but we want to protect against panics\n\t\t\treturn nil, fmt.Errorf(\"internal error: response type not correct\")\n\t\t}\n\t\treply = *r\n\t} else {\n\tRETRY_ONCE:\n\t\tif err := s.agent.RPC(\"PreparedQuery.Execute\", &args, &reply); err != nil {\n\t\t\t// We have to check the string since the RPC sheds\n\t\t\t// the specific error type.\n\t\t\tif err.Error() == consul.ErrQueryNotFound.Error() {\n\t\t\t\tresp.WriteHeader(http.StatusNotFound)\n\t\t\t\tfmt.Fprint(resp, err.Error())\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\tif args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < reply.LastContact {\n\t\t\targs.AllowStale = false\n\t\t\targs.MaxStaleDuration = 0\n\t\t\tgoto RETRY_ONCE\n\t\t}\n\t}\n\treply.ConsistencyLevel = args.QueryOptions.ConsistencyLevel()\n\n\t// Note that we translate using the DC that the results came from, since\n\t// a query can fail over to a different DC than where the execute request\n\t// was sent to. That's why we use the reply's DC and not the one from\n\t// the args.\n\ts.agent.TranslateAddresses(reply.Datacenter, reply.Nodes)\n\n\t// Use empty list instead of nil.\n\tif reply.Nodes == nil {\n\t\treply.Nodes = make(structs.CheckServiceNodes, 0)\n\t}\n\treturn reply, nil\n}", "func (db *DB) Query(query string, args ...interface{}) (*sql.Rows, error) {\n\treturn db.Slave().Query(query, args...)\n}", "func (or *orchestrator) execute() {\n\tdefer func() {\n\t\tor.mgr.finished(or)\n\t}()\n\n\t// access the new heads queue\n\t// it's filled with new heads as the connected node processes blocks from the network\n\theads := repo.ObservedHeaders()\n\tfor {\n\t\tselect {\n\t\tcase <-or.sigStop:\n\t\t\treturn\n\t\tcase h, ok := <-heads:\n\t\t\tif ok {\n\t\t\t\tor.handleNewHead(h)\n\t\t\t}\n\t\tcase idle, ok := <-or.inScanStateSwitch:\n\t\t\tif ok {\n\t\t\t\tor.pushHeads = idle\n\t\t\t\tif idle {\n\t\t\t\t\tor.unloadCache()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *cassandra) ExecuteQuery(queryString string, queryParams ...interface{}) error {\n\treturn c.ExecuteQueryCtx(context.Background(), queryString, queryParams...)\n}", "func (m *MockFinder) Execute(ctx context.Context, query string, from int64, until int64) error {\n\tm.query = query\n\treturn nil\n}", "func (r *Client) Execute(s ...string) {\n\n\tout := r.ExecuteAndReturn(s...)\n\n\tprint(out)\n}" ]
[ "0.7661195", "0.6501132", "0.6437562", "0.63833964", "0.63611424", "0.62889", "0.62788844", "0.6257603", "0.6251378", "0.6196262", "0.61617357", "0.6148566", "0.6122157", "0.6094865", "0.6034637", "0.5990816", "0.5990469", "0.59717685", "0.5960647", "0.5958406", "0.5949864", "0.5918954", "0.5912901", "0.587771", "0.5864577", "0.58634293", "0.5862979", "0.58521533", "0.5846386", "0.584128", "0.58306223", "0.5830235", "0.58214396", "0.5808182", "0.5807255", "0.5801404", "0.5767581", "0.5766105", "0.57651377", "0.57514685", "0.5750377", "0.5736175", "0.5734502", "0.57307535", "0.5725994", "0.5721862", "0.57111764", "0.57105124", "0.5702746", "0.5693934", "0.56805784", "0.5675131", "0.5672108", "0.5667733", "0.5660289", "0.5651254", "0.56423086", "0.56406844", "0.5634108", "0.5632445", "0.5628478", "0.5626932", "0.5616179", "0.56117225", "0.5598679", "0.55956364", "0.5585598", "0.558386", "0.55804026", "0.557956", "0.55724394", "0.55718106", "0.5568135", "0.556455", "0.5560768", "0.5555578", "0.5555563", "0.5552757", "0.554466", "0.55414754", "0.553557", "0.5528804", "0.5521325", "0.55178267", "0.55133367", "0.55129427", "0.5511147", "0.548688", "0.54831177", "0.54772216", "0.5476033", "0.5474353", "0.5471858", "0.5459325", "0.5453839", "0.5453023", "0.54519385", "0.54519093", "0.5450886", "0.54455346" ]
0.7390879
1
execute runs in a separate separate goroutine and streams data from processors.
func (e *Executor) execute(out chan *Row) { // TODO: Support multi-value rows. // Initialize map of rows by encoded tagset. rows := make(map[string]*Row) // Combine values from each processor. loop: for { // Retrieve values from processors and write them to the approprite // row based on their tagset. for i, p := range e.processors { // Retrieve data from the processor. m, ok := <-p.C() if !ok { break loop } // Set values on returned row. for k, v := range m { // Extract timestamp and tag values from key. b := []byte(k) timestamp := int64(binary.BigEndian.Uint64(b[0:8])) // Lookup row values and populate data. values := e.createRowValuesIfNotExists(rows, e.processors[0].name(), b[8:], timestamp) values[i+1] = v } } } // Normalize rows and values. // This converts the timestamps from nanoseconds to microseconds. a := make(Rows, 0, len(rows)) for _, row := range rows { for _, values := range row.Values { values[0] = values[0].(int64) / int64(time.Microsecond) } a = append(a, row) } sort.Sort(a) // Send rows to the channel. for _, row := range a { out <- row } // Mark the end of the output channel. close(out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (processor *Processor) Execute(input <-chan Message) <-chan Message {\n\tvar wg sync.WaitGroup\n\tnumTasks := processor.demux.FanOut()\n\twg.Add(numTasks)\n\n\tout := make(chan Message)\n\n\twork := func(taskId int, inputStream <-chan Message) {\n\t\tfor message := range inputStream {\n\t\t\tprocessor.process(processor.name, processor.cfg, message, out)\n\t\t}\n\t\twg.Done()\n\t}\n\n\tgo func() {\n\t\tprocessor.demux.Execute(input)\n\t\tfor i := 0; i < numTasks; i++ {\n\t\t\tgo work(i, processor.demux.Output(i))\n\t\t}\n\t}()\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(out)\n\t}()\n\n\treturn out\n}", "func (a *aggregator) Run() {\n\tgo a.submitter()\n\n\tfor m := range a.in {\n\t\tfor _, out_m := range a.process(m) {\n\t\t\ta.out <- out_m\n\t\t}\n\t}\n}", "func runProcessor() {\n\t// process callback is invoked for each message delivered from\n\t// \"example-stream\" topic.\n\tcb := func(ctx goka.Context, msg interface{}) {\n\n\t\t// during the second run, this should break (as value should already be in context)\n\t\tif val := ctx.Value(); val != nil {\n\t\t\tpanic(fmt.Sprintf(\"dealing with a value already in context %v\", ctx.Value()))\n\t\t}\n\n\t\t// store received value in context (first run)\n\t\tctx.SetValue(msg.(string))\n\t\tlog.Printf(\"stored to ctx key = %s, msg = %v\", ctx.Key(), msg)\n\t}\n\n\t// Define a new processor group. The group defines all inputs, outputs, and\n\t// serialization formats. The group-table topic is \"example-group-table\".\n\tg := goka.DefineGroup(group,\n\t\tgoka.Input(topic, new(codec.String), cb),\n\t\tgoka.Persist(new(codec.String)),\n\t)\n\n\tp, err := goka.NewProcessor(brokers, g)\n\tif err != nil {\n\t\tlog.Fatalf(\"error creating processor: %v\", err)\n\t}\n\tif err = p.Run(context.Background()); err != nil {\n\t\tlog.Fatalf(\"error running processor: %v\", err)\n\t}\n}", "func runProcessor() {\n\t// process callback is invoked for each message delivered from\n\t// \"example-stream\" topic.\n\tcb := func(ctx goka.Context, msg interface{}) {\n\t\tvar counter int64\n\t\t// ctx.Value() gets from the group table the value that is stored for\n\t\t// the message's key.\n\t\tif val := ctx.Value(); val != nil {\n\t\t\tcounter = val.(int64)\n\t\t}\n\t\tcounter++\n\t\t// SetValue stores the incremented counter in the group table for in\n\t\t// the message's key.\n\t\tctx.SetValue(counter)\n\t\tlog.Printf(\"key = %s, counter = %v, msg = %v\", ctx.Key(), counter, msg)\n\t}\n\n\t// Define a new processor group. The group defines all inputs, outputs, and\n\t// serialization formats. The group-table topic is \"example-group-table\".\n\tg := goka.DefineGroup(group,\n\t\tgoka.Input(topic, new(codec.String), cb),\n\t\tgoka.Persist(new(codec.Int64)),\n\t)\n\n\tp, err := goka.NewProcessor(brokers,\n\t\tg,\n\t\tgoka.WithTopicManagerBuilder(goka.TopicManagerBuilderWithTopicManagerConfig(tmc)),\n\t\tgoka.WithConsumerGroupBuilder(goka.DefaultConsumerGroupBuilder),\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error creating processor: %v\", err)\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\tif err = p.Run(ctx); err != nil {\n\t\t\tlog.Printf(\"error running processor: %v\", err)\n\t\t}\n\t}()\n\n\tsigs := make(chan os.Signal)\n\tgo func() {\n\t\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)\n\t}()\n\n\tselect {\n\tcase <-sigs:\n\tcase <-done:\n\t}\n\tcancel()\n\t<-done\n}", "func (pf ProcFn) Run(ctx context.Context, stream <-chan Msg) error { return pf(ctx, stream) }", "func (c *Pump) run() error {\n\t// FIXME aconway 2015-03-17: error handling\n\tc.waiter.Add(2)\n\tvar readError, writeError error\n\n\tgo func() { // Read\n\t\trbuf, rbuf2 := make([]byte, bufferSize), make([]byte, bufferSize)\n\t\tfor {\n\t\t\trbuf = rbuf[:cap(rbuf)]\n\t\t\tn, err := c.conn.Read(rbuf)\n\t\t\tif n > 0 {\n\t\t\t\tc.read <- rbuf[:n]\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treadError = err\n\t\t\t\tbreak\n\t\t\t}\n\t\t\trbuf, rbuf2 = rbuf2, rbuf // Swap the buffers, fill the one not in use.\n\t\t}\n\t\tclose(c.read)\n\t\tc.waiter.Done()\n\t}()\n\n\tgo func() { // Write\n\t\tfor wbuf := range c.write {\n\t\t\t_, err := c.conn.Write(wbuf)\n\t\t\tif err != nil {\n\t\t\t\twriteError = err\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tc.waiter.Done()\n\t}()\n\n\t// Proton driver loop\n\twbuf, wbuf2 := make([]byte, bufferSize), make([]byte, bufferSize)\n\twbuf = c.pop(wbuf) // First write buffer\n\tfor { // handle pn_transport_t\n\t\tselect {\n\t\tcase buf, ok := <-c.read: // Read a buffer\n\t\t\tif !ok { // Read channel closed\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tc.push(buf)\n\n\t\tcase c.write <- wbuf: // Write a buffer\n\t\t\twbuf, wbuf2 = wbuf2, wbuf // Swap the buffers, fill the unused one.\n\t\t\twbuf = c.pop(wbuf) // Next buffer to write\n\n\t\tcase f := <-c.inject: // Function injected from another goroutine\n\t\t\tf()\n\t\t}\n\t\tc.process() // FIXME aconway 2015-03-17: error handling\n\t}\n\n\tclose(c.write)\n\tc.waiter.Wait() // Wait for read/write goroutines to finish\n\tswitch {\n\tcase readError != nil:\n\t\treturn readError\n\tcase writeError != nil:\n\t\treturn writeError\n\t}\n\treturn nil\n}", "func (d *disp) process() {\n\tfor {\n\t\tselect {\n\t\tcase pipeline := <-d.PipelineChan:\n\n\t\t\tPipelineChan := <-d.PipelineQueue\n\n\t\t\tPipelineChan <- pipeline\n\n\t\tcase job := <-d.JobChan: // listen to any submitted job on the WorkChan\n\t\t\t// wait for a worker2 to submit JobChan to JobQueue\n\t\t\t// note that this JobQueue is shared among all workers.\n\t\t\t// Whenever there is an available JobChan on JobQueue pull it\n\t\t\tJobChan := <-d.Queue\n\n\t\t\t// Once a jobChan is available, send the submitted Job on this JobChan\n\t\t\tJobChan <- job\n\t\t}\n\t}\n}", "func (e *Executor) Execute() (<-chan *Row, error) {\n\t// Initialize processors.\n\tfor _, p := range e.processors {\n\t\tp.start()\n\t}\n\n\t// Create output channel and stream data in a separate goroutine.\n\tout := make(chan *Row, 0)\n\tgo e.execute(out)\n\n\treturn out, nil\n}", "func (w *noAggregationStreamWorker) run() {\n\tlog.Debugf(\"Starting streaming routine for the no-aggregation pipeline\")\n\n\tticker := time.NewTicker(noAggWorkerStreamCheckFrequency)\n\tdefer ticker.Stop()\n\tlogPayloads := config.Datadog.GetBool(\"log_payloads\")\n\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\n\tstopped := false\n\tvar stopBlockChan chan struct{}\n\tvar lastStream time.Time\n\n\tfor !stopped {\n\t\tstart := time.Now()\n\t\tserializedSamples := 0\n\n\t\tmetrics.Serialize(\n\t\t\tw.seriesSink,\n\t\t\tw.sketchesSink,\n\t\t\tfunc(seriesSink metrics.SerieSink, sketchesSink metrics.SketchesSink) {\n\t\t\tmainloop:\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\n\t\t\t\t\t// stop signal\n\t\t\t\t\tcase trigger := <-w.stopChan:\n\t\t\t\t\t\tstopped = true\n\t\t\t\t\t\tstopBlockChan = trigger.blockChan\n\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\n\t\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t\tn := time.Now()\n\t\t\t\t\t\tif serializedSamples > 0 && lastStream.Before(n.Add(-time.Second*1)) {\n\t\t\t\t\t\t\tlog.Debug(\"noAggregationStreamWorker: triggering an automatic payloads flush to the forwarder (no traffic since 1s)\")\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\n\t\t\t\t\t// receiving samples\n\t\t\t\t\tcase samples := <-w.samplesChan:\n\t\t\t\t\t\tlog.Debugf(\"Streaming %d metrics from the no-aggregation pipeline\", len(samples))\n\t\t\t\t\t\tfor _, sample := range samples {\n\t\t\t\t\t\t\t// enrich metric sample tags\n\t\t\t\t\t\t\tsample.GetTags(w.taggerBuffer, w.metricBuffer)\n\t\t\t\t\t\t\tw.metricBuffer.AppendHashlessAccumulator(w.taggerBuffer)\n\n\t\t\t\t\t\t\t// turns this metric sample into a serie\n\t\t\t\t\t\t\tvar serie metrics.Serie\n\t\t\t\t\t\t\tserie.Name = sample.Name\n\t\t\t\t\t\t\tserie.Points = []metrics.Point{{Ts: sample.Timestamp, Value: sample.Value}}\n\t\t\t\t\t\t\tserie.Tags = tagset.CompositeTagsFromSlice(w.metricBuffer.Copy())\n\t\t\t\t\t\t\tserie.Host = sample.Host\n\t\t\t\t\t\t\t// ignored when late but mimic dogstatsd traffic here anyway\n\t\t\t\t\t\t\tserie.Interval = 10\n\t\t\t\t\t\t\tw.seriesSink.Append(&serie)\n\n\t\t\t\t\t\t\tw.taggerBuffer.Reset()\n\t\t\t\t\t\t\tw.metricBuffer.Reset()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlastStream = time.Now()\n\n\t\t\t\t\t\tserializedSamples += len(samples)\n\t\t\t\t\t\tif serializedSamples > w.maxMetricsPerPayload {\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, func(serieSource metrics.SerieSource) {\n\t\t\t\tsendIterableSeries(w.serializer, start, serieSource)\n\t\t\t}, func(sketches metrics.SketchesSource) {\n\t\t\t\t// noop: we do not support sketches in the no-agg pipeline.\n\t\t\t})\n\n\t\tif stopped {\n\t\t\tbreak\n\t\t}\n\n\t\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\t}\n\n\tif stopBlockChan != nil {\n\t\tclose(stopBlockChan)\n\t}\n}", "func (or *orchestrator) execute() {\n\tdefer func() {\n\t\tor.mgr.finished(or)\n\t}()\n\n\t// access the new heads queue\n\t// it's filled with new heads as the connected node processes blocks from the network\n\theads := repo.ObservedHeaders()\n\tfor {\n\t\tselect {\n\t\tcase <-or.sigStop:\n\t\t\treturn\n\t\tcase h, ok := <-heads:\n\t\t\tif ok {\n\t\t\t\tor.handleNewHead(h)\n\t\t\t}\n\t\tcase idle, ok := <-or.inScanStateSwitch:\n\t\t\tif ok {\n\t\t\t\tor.pushHeads = idle\n\t\t\t\tif idle {\n\t\t\t\t\tor.unloadCache()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (g *Gossiper) Run(ctx context.Context) {\n\tsths := make(chan sthInfo, g.bufferSize)\n\n\tvar wg sync.WaitGroup\n\twg.Add(1 + len(g.srcs))\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tglog.Info(\"starting Submitter\")\n\t\tg.Submitter(ctx, sths)\n\t\tglog.Info(\"finished Submitter\")\n\t}()\n\tfor _, src := range g.srcs {\n\t\tgo func(src *sourceLog) {\n\t\t\tdefer wg.Done()\n\t\t\tglog.Infof(\"starting Retriever(%s)\", src.Name)\n\t\t\tsrc.Retriever(ctx, g, sths)\n\t\t\tglog.Infof(\"finished Retriever(%s)\", src.Name)\n\t\t}(src)\n\t}\n\twg.Wait()\n}", "func main() {\n\tgo produce()\n\tgo consume()\n\t<-done\n}", "func (this *service) processor() {\n\tthis.logger.Debugf(\"(%s) Starting processor\", this.cid())\n\n\tthis.wgStarted.Done()\n\tdefer this.wgStopped.Done()\n\n\tfor {\n\t\t// 1. Find out what message is next and the size of the message\n\t\tmtype, total, err := this.peekMessageSize()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error peeking next message size: %v\", this.cid(), err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tmsg, n, err := this.peekMessage(mtype, total)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error peeking next message: %v\", this.cid(), err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t//this.logger.Debugf(\"(%s) Received: %s\", this.cid(), msg)\n\n\t\tthis.inStat.increment(int64(n))\n\n\t\t// 5. Process the read message\n\t\terr = this.processIncoming(msg)\n\t\tif err != nil {\n\t\t\tif err != errDisconnect {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error processing %s: %v\", this.cid(), msg.Name(), err)\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// 7. We should commit the bytes in the buffer so we can move on\n\t\t_, err = this.in.ReadCommit(total)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error committing %d read bytes: %v\", this.cid(), total, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// 7. Check to see if done is closed, if so, exit\n\t\tif this.isDone() && this.in.Len() == 0 {\n\t\t\treturn\n\t\t}\n\n\t\t//if this.inStat.msgs%1000 == 0 {\n\t\t//\tthis.logger.Debugf(\"(%s) Going to process message %d\", this.cid(), this.inStat.msgs)\n\t\t//}\n\t}\n}", "func (r fifo) Run(ctx context.Context, params StageParams) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase payloadIn, ok := <-params.Input():\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpayloadOut, err := r.proc.Process(ctx, payloadIn)\n\t\t\tif err != nil {\n\t\t\t\twrappedErr := xerrors.Errorf(\"pipeline stage %d : %w \", params.StageIndex(), err)\n\t\t\t\tmaybeEmitError(wrappedErr, params.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif payloadOut == nil {\n\t\t\t\tpayloadIn.MarkAsProcessed()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase params.Output() <- payloadOut:\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (r *processRunner) run(pipeID, componentID string, cancel chan struct{}, in <-chan message, meter *meter) (<-chan message, <-chan error) {\n\terrc := make(chan error, 1)\n\tr.in = in\n\tr.out = make(chan message)\n\tgo func() {\n\t\tdefer close(r.out)\n\t\tdefer close(errc)\n\t\tcall(r.reset, pipeID, errc) // reset hook\n\t\tvar err error\n\t\tvar m message\n\t\tvar ok bool\n\t\tfor {\n\t\t\t// retrieve new message\n\t\t\tselect {\n\t\t\tcase m, ok = <-in:\n\t\t\t\tif !ok {\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tm.applyTo(componentID) // apply params\n\t\t\tm.Buffer, err = r.fn(m.Buffer) // process new buffer\n\t\t\tif err != nil {\n\t\t\t\terrc <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmeter = meter.sample(int64(m.Buffer.Size())).message()\n\n\t\t\tm.feedback.applyTo(componentID) // apply feedback\n\n\t\t\t// send message further\n\t\t\tselect {\n\t\t\tcase r.out <- m:\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn r.out, errc\n}", "func execute(proc *caire.Processor) {\n\tvar err error\n\tproc.Spinner = spinner\n\n\t// Supported files\n\tvalidExtensions := []string{\".jpg\", \".png\", \".jpeg\", \".bmp\", \".gif\"}\n\n\t// Check if source path is a local image or URL.\n\tif utils.IsValidUrl(*source) {\n\t\tsrc, err := utils.DownloadImage(*source)\n\t\tif src != nil {\n\t\t\tdefer os.Remove(src.Name())\n\t\t}\n\t\tdefer src.Close()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\n\t\t\t\tutils.DecorateText(\"Failed to load the source image: %v\", utils.ErrorMessage),\n\t\t\t\tutils.DecorateText(err.Error(), utils.DefaultMessage),\n\t\t\t)\n\t\t}\n\t\tfs, err = src.Stat()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\n\t\t\t\tutils.DecorateText(\"Failed to load the source image: %v\", utils.ErrorMessage),\n\t\t\t\tutils.DecorateText(err.Error(), utils.DefaultMessage),\n\t\t\t)\n\t\t}\n\t\timg, err := os.Open(src.Name())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\n\t\t\t\tutils.DecorateText(\"Unable to open the temporary image file: %v\", utils.ErrorMessage),\n\t\t\t\tutils.DecorateText(err.Error(), utils.DefaultMessage),\n\t\t\t)\n\t\t}\n\t\timgfile = img\n\t} else {\n\t\t// Check if the source is a pipe name or a regular file.\n\t\tif *source == pipeName {\n\t\t\tfs, err = os.Stdin.Stat()\n\t\t} else {\n\t\t\tfs, err = os.Stat(*source)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\n\t\t\t\tutils.DecorateText(\"Failed to load the source image: %v\", utils.ErrorMessage),\n\t\t\t\tutils.DecorateText(err.Error(), utils.DefaultMessage),\n\t\t\t)\n\t\t}\n\t}\n\n\tnow := time.Now()\n\n\tswitch mode := fs.Mode(); {\n\tcase mode.IsDir():\n\t\tvar wg sync.WaitGroup\n\t\t// Read destination file or directory.\n\t\t_, err := os.Stat(*destination)\n\t\tif err != nil {\n\t\t\terr = os.Mkdir(*destination, 0755)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\n\t\t\t\t\tutils.DecorateText(\"Unable to get dir stats: %v\\n\", utils.ErrorMessage),\n\t\t\t\t\tutils.DecorateText(err.Error(), utils.DefaultMessage),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t\tproc.Preview = false\n\n\t\t// Limit the concurrently running workers to maxWorkers.\n\t\tif *workers <= 0 || *workers > maxWorkers {\n\t\t\t*workers = runtime.NumCPU()\n\t\t}\n\n\t\t// Process recursively the image files from the specified directory concurrently.\n\t\tch := make(chan result)\n\t\tdone := make(chan interface{})\n\t\tdefer close(done)\n\n\t\tpaths, errc := walkDir(done, *source, validExtensions)\n\n\t\twg.Add(*workers)\n\t\tfor i := 0; i < *workers; i++ {\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tconsumer(done, paths, *destination, proc, ch)\n\t\t\t}()\n\t\t}\n\n\t\t// Close the channel after the values are consumed.\n\t\tgo func() {\n\t\t\tdefer close(ch)\n\t\t\twg.Wait()\n\t\t}()\n\n\t\t// Consume the channel values.\n\t\tfor res := range ch {\n\t\t\tif res.err != nil {\n\t\t\t\terr = res.err\n\t\t\t}\n\t\t\tprintStatus(res.path, err)\n\t\t}\n\n\t\tif err = <-errc; err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, utils.DecorateText(err.Error(), utils.ErrorMessage))\n\t\t}\n\n\tcase mode.IsRegular() || mode&os.ModeNamedPipe != 0: // check for regular files or pipe names\n\t\text := filepath.Ext(*destination)\n\t\tif !isValidExtension(ext, validExtensions) && *destination != pipeName {\n\t\t\tlog.Fatalf(utils.DecorateText(fmt.Sprintf(\"%v file type not supported\", ext), utils.ErrorMessage))\n\t\t}\n\n\t\terr = processor(*source, *destination, proc)\n\t\tprintStatus(*destination, err)\n\t}\n\tif err == nil {\n\t\tfmt.Fprintf(os.Stderr, \"\\nExecution time: %s\\n\", utils.DecorateText(fmt.Sprintf(\"%s\", utils.FormatTime(time.Since(now))), utils.SuccessMessage))\n\t}\n}", "func (trd *trxDispatcher) execute() {\n\t// don't forget to sign off after we are done\n\tdefer func() {\n\t\tclose(trd.outAccount)\n\t\tclose(trd.outLog)\n\t\tclose(trd.outTransaction)\n\n\t\ttrd.mgr.finished(trd)\n\t}()\n\n\t// wait for transactions and process them\n\tfor {\n\t\t// try to read next transaction\n\t\tselect {\n\t\tcase <-trd.sigStop:\n\t\t\treturn\n\t\tcase <-trd.bot.C:\n\t\t\ttrd.updateLastSeenBlock()\n\t\tcase evt, ok := <-trd.inTransaction:\n\t\t\t// is the channel even available for reading\n\t\t\tif !ok {\n\t\t\t\tlog.Notice(\"trx channel closed, terminating %s\", trd.name())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif evt.blk == nil || evt.trx == nil {\n\t\t\t\tlog.Criticalf(\"dispatcher dry loop\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttrd.process(evt)\n\t\t}\n\t}\n}", "func (p *dynamicWorkerPool) Run(ctx context.Context, params StageParams) {\nstop:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak stop\n\t\tcase payloadIn, ok := <-params.Input():\n\t\t\tif !ok {\n\t\t\t\tbreak stop\n\t\t\t}\n\t\t\tvar token struct{}\n\t\t\tselect {\n\t\t\tcase token = <-p.tokenPool:\n\t\t\tcase <-ctx.Done():\n\t\t\t}\n\t\t\tgo func(payloadIn Payload, token struct{}) {\n\t\t\t\tdefer func() { p.tokenPool <- token }()\n\t\t\t\tpayloadOut, err := p.proc.Process(ctx, payloadIn)\n\t\t\t\tif err != nil {\n\t\t\t\t\twrappedErr := xerrors.Errorf(\"pipeline stage: %d : %w \", params.StageIndex(), err)\n\t\t\t\t\tmaybeEmitError(wrappedErr, params.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif payloadOut == nil {\n\t\t\t\t\tpayloadIn.MarkAsProcessed()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase params.Output() <- payloadOut:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t}\n\t\t\t}(payloadIn, token)\n\t\t}\n\t}\n\tfor i := 0; i < cap(p.tokenPool); i++ {\n\t\t<-p.tokenPool\n\t}\n}", "func (r *Runner) run() {\n\tfor {\n\t\ttask := r.rq.Pop()\n\t\tr.process(task)\n\t}\n}", "func (p *Pipeline) Execute(ctx context.Context, src InputSource, sink OutputSink) error {\n\treturn p.ExecuteBuffered(ctx, src, sink, 1)\n}", "func (p *literalProcessor) run() {\n\tfor {\n\t\tselect {\n\t\tcase ch := <-p.done:\n\t\t\tclose(ch)\n\t\t\treturn\n\t\tcase p.c <- map[string]interface{}{\"\": p.val}:\n\t\t}\n\t}\n}", "func (p *SingleLineParser) run() {\n\tfor input := range p.inputChan {\n\t\tp.process(input)\n\t}\n\tp.lineHandler.Stop()\n}", "func (e *EventEmitter) execute(listener *Listener, event *Event) {\n\tdefer e.gracefulWait.Done()\n\n\tfor _, filterFunc := range e.filterFuncs {\n\t\tif !filterFunc(event) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar (\n\t\tdata Data\n\t\texecutionTags []string\n\t)\n\n\tif e.mapFunc != nil {\n\t\tdata = e.mapFunc(event)\n\t} else if err := event.Data(&data); err != nil {\n\t\te.app.log.Println(errDecodingEventData{err})\n\t\treturn\n\t}\n\n\tif e.executionTagsFunc != nil {\n\t\texecutionTags = e.executionTagsFunc(event)\n\t}\n\n\tif _, err := e.app.execute(e.taskServiceID, e.taskKey, data, executionTags); err != nil {\n\t\te.app.log.Println(executionError{e.taskKey, err})\n\t}\n}", "func (pm *PipelineManager) runWorker() {\n\tfor pm.processNextWorkItem() {\n\t}\n}", "func (r *mutationStreamReader) run() {\n\n\t//panic handler\n\tdefer r.panicHandler()\n\n\tfor {\n\t\tselect {\n\n\t\tcase msg, ok := <-r.streamMutch:\n\n\t\t\tif ok {\n\t\t\t\tswitch msg.(type) {\n\t\t\t\tcase []*protobuf.VbKeyVersions:\n\t\t\t\t\tvbKeyVer := msg.([]*protobuf.VbKeyVersions)\n\t\t\t\t\tr.handleVbKeyVersions(vbKeyVer)\n\n\t\t\t\tdefault:\n\t\t\t\t\tr.handleStreamInfoMsg(msg)\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\t//stream library has closed this channel indicating\n\t\t\t\t//unexpected stream closure send the message to supervisor\n\t\t\t\tlogging.Fatalf(\"MutationStreamReader::run Unexpected Mutation \"+\n\t\t\t\t\t\"Channel Close for Stream %v\", r.streamId)\n\t\t\t\tmsgErr := &MsgError{\n\t\t\t\t\terr: Error{code: ERROR_STREAM_READER_STREAM_SHUTDOWN,\n\t\t\t\t\t\tseverity: FATAL,\n\t\t\t\t\t\tcategory: STREAM_READER}}\n\t\t\t\tr.supvRespch <- msgErr\n\t\t\t}\n\n\t\tcase <-r.killch:\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func (s *Scavenger) run() {\n\tdefer func() {\n\t\ts.emitStats()\n\t\tgo s.Stop()\n\t\ts.stopWG.Done()\n\t}()\n\n\t// Start a task to delete orphaned tasks from the tasks table, if enabled\n\tif s.cleanOrphans() {\n\t\ts.executor.Submit(&orphanExecutorTask{scvg: s})\n\t}\n\n\tvar pageToken []byte\n\tfor {\n\t\tresp, err := s.listTaskList(taskListBatchSize, pageToken)\n\t\tif err != nil {\n\t\t\ts.logger.Error(\"listTaskList error\", tag.Error(err))\n\t\t\treturn\n\t\t}\n\n\t\tfor _, item := range resp.Items {\n\t\t\tatomic.AddInt64(&s.stats.tasklist.nProcessed, 1)\n\t\t\tif !s.executor.Submit(s.newTask(&item)) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tpageToken = resp.NextPageToken\n\t\tif pageToken == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\ts.awaitExecutor()\n}", "func ExecutePipeline(jobs ...job) {\r\n\twg := &sync.WaitGroup{}\r\n\tcountJobs := len(jobs)\r\n\tout := make(chan interface{})\r\n\tfor i := 0; i < countJobs; i++ {\r\n\t\tin := out\r\n\t\tout = make(chan interface{})\r\n\t\twg.Add(1)\r\n\t\tgo myGoroutine(in, out, jobs[i], wg)\r\n\t}\r\n\twg.Wait()\r\n}", "func TestProcessorPool_execute(t *testing.T){\n\tnewPipeline := NewPipeline(uint32(1))\n\tstgFactory := newStageFactory(newPipeline)\n\t//create stage1 and its related processor pool and msg content\n\tstg1:= stgFactory.new(\"stage1\",TRANSFORM)\n\tprcPool := newProcessorPool(stg1)\n\tstg1.processorPool = prcPool\n\tprcFactory := newProcessorFactory(stg1)\n\troute := msgRoutes{\"path\": struct{}{}}\n\tprocessor1 := prcFactory.new(DefaultProcessorOptions,newDummyExecutor(TRANSFORM),route)\n\tprcPool.attach(processor1)\n\tmsgFactory := message.NewFactory(newPipeline.id,stg1.id,processor1.id)\n\tmsgContent := content2.New()\n\tmsgContent.Add(\"key\",content2.NewFieldValue(\"hello\",content2.STRING))\n\tmsg := msgFactory.NewExecuteRoot(msgContent, false)\n\n\t//create stage2 and its related processor pool\n\tstg2:= stgFactory.new(\"stage2\",TRANSFORM)\n\tprocessor1.addSendTo(stg2, \"sendPath\")\n\n\t//creating channel from processor 1 to stg2\n\treceiver := processor1.channelForStageId(stg2)\n\tprocessor2 := prcFactory.new(DefaultProcessorOptions,newDummyExecutor(TRANSFORM),route)\n\tstg3 := stgFactory.new(\"Stage3\",SINK)\n\tprcPool.attach(processor2)\n\tprocessor2.addSendTo(stg3,\"sendPath3\")\n\tprocessor1.addSendTo(stg3,\"sendPath2\")\n\treceiver2 := processor1.channelForStageId(stg3)\n\treceiver3 := processor1.channelForStageId(stg3)\n\n\tprcPool.lock(route)\n\n\tt.Run(\"single processor sending to the stage\", func(t *testing.T) {\n\t\tmsgPack := MsgPod{\n\t\t\tMsg: msg,\n\t\t\tRoute: MsgRouteParam(\"path\"),\n\t\t}\n\n\t\t// check if the processor is closed before complete execution\n\t\tassert.Equal(t,false,prcPool.isClosed(),\"want processor to be running but received processor closed\")\n\t\tprcPool.execute(msgPack)\n\t\tselect {\n\t\tcase receivedMsg := <-receiver:\n\t\t\tm := receivedMsg.Msg\n\t\t\tif !reflect.DeepEqual(m.Content(), msg.Content()) {\n\t\t\t\tt.Errorf(\"Want: %v\\nGot: %v\\n\", msg.Content(), m.Content())\n\t\t\t}\n\t\t\tassert.Equal(t, msg.Id(), m.Id())\n\t\t}\n\t})\n\tprcPool.done()\n\t//check if the processor is closed after complete execution\n\tassert.Equal(t,true,prcPool.isClosed(),\"want processor to be closed but received processor running\")\n\t//multiple channel from multiple processor to one receiving stage\n\n\tt.Run(\"multiple processor sending to same stage\", func(t *testing.T) {\n\t\tmsgPack := MsgPod{\n\t\t\tMsg: msg,\n\t\t\tRoute: MsgRouteParam(\"path\"),\n\t\t}\n\t\tprcPool.execute(msgPack)\n\t\tselect {\n\t\tcase receivedMsg2 := <-receiver2:\n\t\t\tm := receivedMsg2.Msg\n\t\t\tif !reflect.DeepEqual(m.Content(), msg.Content()) {\n\t\t\t\tt.Errorf(\"Want: %v\\nGot: %v\\n\", msg.Content(), m.Content())\n\t\t\t}\n\t\t\tassert.Equal(t, msg.Id(), m.Id())\n\t\tcase receivedMsg3 := <-receiver3:\n\t\t\tm := receivedMsg3.Msg\n\t\t\tif !reflect.DeepEqual(m.Content(), msg.Content()) {\n\t\t\t\tt.Errorf(\"Want: %v\\nGot: %v\\n\", msg.Content(), m.Content())\n\t\t\t}\n\t\t\tassert.Equal(t, msg.Id(), m.Id())\n\t\t}\n\t})\n}", "func (p *Producer) Run() {\n\tp.wg.Add(1)\n\tdefer p.wg.Done()\n\n\tsendMsg := func(routingKey string, data []byte) {\n\t\ttimeStamp := time.Now()\n\t\terr := p.rabbitChannel.Publish(\n\t\t\tp.rabbitExchange,\n\t\t\troutingKey,\n\t\t\tfalse,\n\t\t\tfalse,\n\t\t\tamqp.Publishing{\n\t\t\t\tDeliveryMode: amqp.Persistent,\n\t\t\t\tTimestamp: timeStamp,\n\t\t\t\tContentType: \"text/plain\",\n\t\t\t\tBody: data,\n\t\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error publishing %s\", string(data))\n\t\t\tp.writeFailure(\n\t\t\t\tfmt.Sprintf(\"%s/%s-%d.txt\",\n\t\t\t\t\tp.failureDir,\n\t\t\t\t\troutingKey,\n\t\t\t\t\ttimeStamp.UnixNano()),\n\t\t\t\tdata)\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-p.eventsChan:\n\t\t\tsendMsg(\"raw_events\", event)\n\t\tcase meter := <-p.metersChan:\n\t\t\tsendMsg(\"raw_meters\", meter)\n\t\tcase <-p.quitChan:\n\t\t\tp.rabbitChannel.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func Process(c chan string, command string, file *os.File, interpreter string, isVerbose bool, progBar *pb.ProgressBar) {\n\tfor hostname := range c {\n\t\tvar out bytes.Buffer\n\t\t// time.Sleep(time.Millisecond * 1000)\n\t\tif !isVerbose {\n\t\t\tprogBar.Start()\n\t\t\tprogBar.Increment()\n\t\t} else {\n\t\t\tfmt.Printf(\"Processing: %s\\n\", hostname)\n\t\t}\n\t\tcmd := exec.Command(interpreter, command, hostname)\n\t\tcmd.Stdout = &out\n\t\terr := cmd.Run()\n\t\terrcontrol(err)\n\t\t_, writeErr := file.WriteString(out.String() + \"\\n\")\n\t\terrcontrol(writeErr)\n\n\t\tif isVerbose {\n\t\t\tfmt.Fprintln(os.Stdout, out.String())\n\t\t}\n\t}\n}", "func (w *Worker) Run(done <-chan interface{}) error {\n\tdefer close(w.resultStream)\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\tlog.Println(\n\t\t\t\t\"level\", \"INFO\",\n\t\t\t\t\"object\", \"workers.worker\",\n\t\t\t\t\"method\", \"Run\",\n\t\t\t\t\"msg\", \"terminating operations by application request\",\n\t\t\t)\n\t\t\treturn nil\n\t\tcase order, ok := <-w.orderStream:\n\t\t\tif !ok {\n\t\t\t\tlog.Println(\"level\", \"INFO\",\n\t\t\t\t\t\"object\", \"workers.worker\",\n\t\t\t\t\t\"method\", \"Run\",\n\t\t\t\t\t\"msg\", \"terminating operations because order stream was closed\",\n\t\t\t\t)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tw.processOrder(order)\n\t\t}\n\t}\n}", "func (b *QuerySnipBroadcaster) Run() {\n\tfor {\n\t\ts := <-b.in\n\t\tfor _, recipient := range b.recipients {\n\t\t\trecipient <- s\n\t\t}\n\t}\n}", "func (p *Pipe) start() {\n\tp.cancel = make(chan struct{})\n\terrcList := make([]<-chan error, 0, 1+len(p.processors)+len(p.sinks))\n\t// start pump\n\tout, errc := p.pump.run(p.cancel, p.ID(), p.provide, p.consume, p.sampleRate, p.metric)\n\terrcList = append(errcList, errc)\n\n\t// start chained processesing\n\tfor _, proc := range p.processors {\n\t\tout, errc = proc.run(p.cancel, p.ID(), out, p.sampleRate, p.metric)\n\t\terrcList = append(errcList, errc)\n\t}\n\n\tsinkErrcList := p.broadcastToSinks(out)\n\terrcList = append(errcList, sinkErrcList...)\n\tp.errc = mergeErrors(errcList...)\n}", "func (s *StreamingDriver) Run(path string, running *bool) error {\n\tchannel := s.pipeline.GetRootChannel()\n\tsfChannel := channel.(*plugins.SFChannel)\n\n\trecords := sfChannel.In\n\tif err := os.RemoveAll(path); err != nil {\n\t\tlogger.Error.Println(\"remove error:\", err)\n\t\treturn err\n\t}\n\n\tl, err := net.ListenUnix(\"unixpacket\", &net.UnixAddr{Name: path, Net: \"unixpacket\"})\n\tif err != nil {\n\t\tlogger.Error.Println(\"listen error:\", err)\n\t\treturn err\n\t}\n\tdefer l.Close()\n\n\tsFlow := sfgo.NewSysFlow()\n\tdeser, err := compiler.CompileSchemaBytes([]byte(sFlow.Schema()), []byte(sFlow.Schema()))\n\tif err != nil {\n\t\tlogger.Error.Println(\"compiler error:\", err)\n\t\treturn err\n\t}\n\n\tfor *running {\n\t\tbuf := make([]byte, BuffSize)\n\t\toobuf := make([]byte, OOBuffSize)\n\t\treader := bytes.NewReader(buf)\n\t\ts.conn, err = l.AcceptUnix()\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(\"accept error:\", err)\n\t\t\tbreak\n\t\t}\n\t\tfor *running {\n\t\t\tsFlow = sfgo.NewSysFlow()\n\t\t\t_, _, flags, _, err := s.conn.ReadMsgUnix(buf[:], oobuf[:])\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error.Println(\"read error:\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif flags == 0 {\n\t\t\t\treader.Reset(buf)\n\t\t\t\terr = vm.Eval(reader, deser, sFlow)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Error.Println(\"deserialize:\", err)\n\t\t\t\t}\n\t\t\t\trecords <- sFlow\n\t\t\t} else {\n\t\t\t\tlogger.Error.Println(\"Flag error ReadMsgUnix:\", flags)\n\t\t\t}\n\t\t}\n\t\ts.conn.Close()\n\t}\n\tlogger.Trace.Println(\"Closing main channel\")\n\tclose(records)\n\ts.pipeline.Wait()\n\treturn nil\n}", "func (a *Agent) runProcessors(\n\tunits []*processorUnit,\n) error {\n\tvar wg sync.WaitGroup\n\tfor _, unit := range units {\n\t\twg.Add(1)\n\t\tgo func(unit *processorUnit) {\n\t\t\tdefer wg.Done()\n\n\t\t\tacc := NewAccumulator(unit.processor, unit.dst)\n\t\t\tfor m := range unit.src {\n\t\t\t\tif err := unit.processor.Add(m, acc); err != nil {\n\t\t\t\t\tacc.AddError(err)\n\t\t\t\t\tm.Drop()\n\t\t\t\t}\n\t\t\t}\n\t\t\tunit.processor.Stop()\n\t\t\tclose(unit.dst)\n\t\t\tlog.Printf(\"D! [agent] Processor channel closed\")\n\t\t}(unit)\n\t}\n\twg.Wait()\n\n\treturn nil\n}", "func (g *Gossiper) Run(ctx context.Context) {\n\tsths := make(chan sthInfo, g.bufferSize)\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(g.srcs))\n\tfor _, src := range g.srcs {\n\t\tgo func(src *sourceLog) {\n\t\t\tdefer wg.Done()\n\t\t\tglog.Infof(\"starting Retriever(%s)\", src.Name)\n\t\t\tsrc.Retriever(ctx, g, sths)\n\t\t\tglog.Infof(\"finished Retriever(%s)\", src.Name)\n\t\t}(src)\n\t}\n\tglog.Info(\"starting Submitter\")\n\tg.Submitter(ctx, sths)\n\tglog.Info(\"finished Submitter\")\n\n\t// Drain the sthInfo channel during shutdown so the Retrievers don't block on it.\n\tgo func() {\n\t\tfor info := range sths {\n\t\t\tglog.V(1).Infof(\"discard STH from %s\", info.name)\n\t\t}\n\t}()\n\n\twg.Wait()\n\tclose(sths)\n}", "func (c *connection) processAsScript() {\n\n\tqLength, err := conn.getQueueLength(inputs)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tfor i := 0; i < qLength; i++ {\n\t\tif err := c.process(&pushCounter, inputs); err != nil {\n\t\t\tlog.Printf(\"Processed %d / %d\", pushCounter, qLength)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *Pipeline) Run(ctx context.Context) {\n\tp.runMutex.Lock()\n\tdefer p.runMutex.Unlock()\n\tif p.status == STATUS_RUN {\n\t\treturn\n\t}\n\t//logrus.Debug(\"mysql position\", p.Input.Options.Position)\n\tmyCtx, cancel := context.WithCancel(ctx)\n\tp.ctx = myCtx\n\tgo func() {\n\t\tvar err error\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tlogrus.Errorln(\"pipeline run panic, \", r)\n\t\t\t}\n\t\t\tcancel()\n\t\t}()\n\t\tif err = p.Input.Run(myCtx); err != nil {\n\t\t\tevent.Event(event2.NewErrorPipeline(p.Options.Pipeline.Name, \"Start error: \"+err.Error()))\n\t\t\treturn\n\t\t}\n\t\tif err = p.Filter.Run(myCtx); err != nil {\n\t\t\tevent.Event(event2.NewErrorPipeline(p.Options.Pipeline.Name, \"Start error: \"+err.Error()))\n\t\t\treturn\n\t\t}\n\t\tif err = p.Output.Run(myCtx); err != nil {\n\t\t\tevent.Event(event2.NewErrorPipeline(p.Options.Pipeline.Name, \"Start error: \"+err.Error()))\n\t\t\treturn\n\t\t}\n\t\tevent.Event(event2.NewInfoPipeline(p.Options.Pipeline.Name, \"Start succeeded\"))\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\t{\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-p.Input.Context().Done():\n\t\t\t\t{\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-p.Filter.Context().Done():\n\t\t\t\t{\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-p.Output.Context().Done():\n\t\t\t\t{\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func (w *Worker) run(){\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase f := <- w.task:\n\t\t\t\tif f == nil {\n\t\t\t\t\tw.pool.decRunning()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tf()\n\t\t\t\tw.pool.putWorker(w)\n\t\t\tcase args := <- w.args:\n\t\t\t\tif args == nil {\n\t\t\t\t\tw.pool.decRunning()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tw.pool.poolFunc(args)\n\t\t\t\tw.pool.putWorker(w)\n\t\t\t}\n\t\t}\n\t}()\n}", "func main() {\n\tPipeline1()\n\t// Pipeline2()\n\t// RunDirectionalChannel()\n\tfmt.Println(\"YYY\")\n}", "func (d *discoverer) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {\n\t// notice that Prometheus discovery.Discoverer abstraction doesn't allow failures,\n\t// so we must ensure that xDS client is up-and-running all the time.\n\tfor streamID := uint64(1); ; streamID++ {\n\t\terrCh := make(chan error, 1)\n\t\tgo func(errCh chan<- error) {\n\t\t\tdefer close(errCh)\n\t\t\t// recover from a panic\n\t\t\tdefer func() {\n\t\t\t\tif e := recover(); e != nil {\n\t\t\t\t\tif err, ok := e.(error); ok {\n\t\t\t\t\t\terrCh <- err\n\t\t\t\t\t} else {\n\t\t\t\t\t\terrCh <- errors.Errorf(\"%v\", e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\tstream := stream{\n\t\t\t\tlog: d.log.WithValues(\"streamID\", streamID),\n\t\t\t\tconfig: d.config,\n\t\t\t\thandler: &d.handler,\n\t\t\t}\n\t\t\terrCh <- stream.Run(ctx, ch)\n\t\t}(errCh)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\td.log.Info(\"done\")\n\t\t\tbreak\n\t\tcase err := <-errCh:\n\t\t\tif err != nil {\n\t\t\t\td.log.WithValues(\"streamID\", streamID).Error(err, \"xDS stream terminated with an error\")\n\t\t\t}\n\t\t}\n\t}\n}", "func (rc RedisComponent) Process() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg := <-*rc.msgStream:\n\t\t\t\t// Lookup key in database\n\t\t\t\tval, _ := rc.conn.Do(\"GET\", msg)\n\t\t\t\t// Build event structure\n\t\t\t\te := Event{\n\t\t\t\t\tURI: msg,\n\t\t\t\t\tValue: val,\n\t\t\t\t\tTime: time.Now().UTC(),\n\t\t\t\t}\n\t\t\t\t// Write event to outputStream\n\t\t\t\trc.dataStream <- e\n\t\t\t}\n\t\t}\n\t}()\n}", "func (r *sinkRunner) run(pipeID, componentID string, cancel chan struct{}, in <-chan message, meter *meter) <-chan error {\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(errc)\n\t\tcall(r.reset, pipeID, errc) // reset hook\n\t\tvar m message\n\t\tvar ok bool\n\t\tfor {\n\t\t\t// receive new message\n\t\t\tselect {\n\t\t\tcase m, ok = <-in:\n\t\t\t\tif !ok {\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tm.params.applyTo(componentID) // apply params\n\t\t\terr := r.fn(m.Buffer) // sink a buffer\n\t\t\tif err != nil {\n\t\t\t\terrc <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmeter = meter.sample(int64(m.Buffer.Size())).message()\n\n\t\t\tm.feedback.applyTo(componentID) // apply feedback\n\t\t}\n\t}()\n\n\treturn errc\n}", "func processRun(nRequests int, concurrency int, ch chan time.Duration, fun func()) []float64 {\n\tresults := make([]float64, 0, nRequests)\n\n\tn := nRequests\n\tfor n > 0 {\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tif n > 0 {\n\t\t\t\tgo fun()\n\t\t\t\tn--\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tif len(results) < nRequests {\n\t\t\t\tresults = append(results, float64(<-ch)/float64(time.Millisecond))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn results\n}", "func sourceWorker(ctx context.Context, source Source, outCh chan<- Payload, errCh chan<- error) {\n\tfor source.Next(ctx) {\n\t\tpayload := source.Payload()\n\t\tselect {\n\t\tcase outCh <- payload:\n\t\tcase <-ctx.Done():\n\t\t\t// Asked to shutdown\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Check for errors\n\tif err := source.Error(); err != nil {\n\t\twrappedErr := xerrors.Errorf(\"pipeline source: %w\", err)\n\t\tmaybeEmitError(wrappedErr, errCh)\n\t}\n}", "func (e *LoadDataWorker) processStream(\n\tctx context.Context,\n\tparser mydump.Parser,\n\tseeker io.Seeker,\n) (err error) {\n\tdefer func() {\n\t\tr := recover()\n\t\tif r != nil {\n\t\t\tlogutil.Logger(ctx).Error(\"process routine panicked\",\n\t\t\t\tzap.Reflect(\"r\", r),\n\t\t\t\tzap.Stack(\"stack\"))\n\t\t\terr = errors.Errorf(\"%v\", r)\n\t\t}\n\t}()\n\n\tcheckKilled := time.NewTicker(30 * time.Second)\n\tdefer checkKilled.Stop()\n\n\tvar (\n\t\tloggedError = false\n\t\tcurrScannedSize = int64(0)\n\t)\n\tfor {\n\t\t// prepare batch and enqueue task\n\t\tif err = e.ReadOneBatchRows(ctx, parser); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif e.curBatchCnt == 0 {\n\t\t\te.finishedSize += currScannedSize\n\t\t\treturn\n\t\t}\n\n\tTrySendTask:\n\t\tcurrScannedSize, err = seeker.Seek(0, io.SeekCurrent)\n\t\tif err != nil && !loggedError {\n\t\t\tloggedError = true\n\t\t\tlogutil.Logger(ctx).Error(\" LOAD DATA failed to read current file offset by seek\",\n\t\t\t\tzap.Error(err))\n\t\t}\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-checkKilled.C:\n\t\t\tif atomic.CompareAndSwapUint32(&e.Ctx.GetSessionVars().Killed, 1, 0) {\n\t\t\t\tlogutil.Logger(ctx).Info(\"load data query interrupted quit data processing\")\n\t\t\t\tclose(e.commitTaskQueue)\n\t\t\t\treturn ErrQueryInterrupted\n\t\t\t}\n\t\t\tgoto TrySendTask\n\t\tcase e.commitTaskQueue <- commitTask{\n\t\t\tcnt: e.curBatchCnt,\n\t\t\trows: e.rows,\n\t\t\tloadedRowCnt: e.rowCount,\n\t\t\tscannedFileSize: e.finishedSize + currScannedSize,\n\t\t}:\n\t\t}\n\t\t// reset rows buffer, will reallocate buffer but NOT reuse\n\t\te.ResetBatch()\n\t}\n}", "func (p *Process) execute(identifier int) {\n\n\tfor {\n\n\t\t// Pick up a subAccountID to process from queue and mark as Done()\n\t\tsubAccountIDObj, _ := p.Queue.Get()\n\t\tsubAccountID := fmt.Sprintf(\"%v\", subAccountIDObj)\n\n\t\t// TODO Implement cleanup holistically in #kyma-project/control-plane/issues/512\n\t\t//if isShuttingDown {\n\t\t//\t//p.Cleanup()\n\t\t//\treturn\n\t\t//}\n\n\t\tp.processSubAccountID(subAccountID, identifier)\n\t\tp.Queue.Done(subAccountIDObj)\n\t}\n}", "func (f *Function) processResults() {\n\tdefer f.wg.Done()\n\tvar otherClosed bool\n\tfor {\n\t\tselect {\n\t\tcase res, ok := <-f.output:\n\t\t\tif !ok {\n\t\t\t\tif otherClosed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\totherClosed = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf.resHandler(res)\n\n\t\tcase err, ok := <-f.errch:\n\t\t\tif !ok {\n\t\t\t\tif otherClosed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\totherClosed = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf.errHandler(err)\n\t\t}\n\t}\n}", "func (b *Builder) run() {\n\tfor {\n\t\ttask := b.bq.Pop()\n\t\tb.process(task)\n\t}\n}", "func (worker *Worker) Execute() {\n\tfor i := 0; i < worker.NumberOfRequests; i++ {\n\t\tworker.Responses[i] = worker.DoRequest()\n\t}\n\tworker.wg.Done()\n}", "func (p *Parallel) do() {\n\t// if only one pipeline no need go routines\n\tif len(p.pipes) == 1 {\n\t\tp.secure(p.pipes[0])\n\t\treturn\n\t}\n\tfor _, pipe := range p.pipes {\n\t\tgo p.secure(pipe)\n\t}\n}", "func (w *Worker) run(tasks chan *ReadTaskOp, store chan *WriteStoreOp, service chan *ComplainOp) {\n\tfor {\n\t\ttime.Sleep(time.Duration(config.WorkerSpeed * time.Millisecond))\n\t\tnumber := rand.Int() % 100\n\t\tif number < config.WorkerSensitive {\n\t\t\tw.getAndExecute(tasks, store, service)\n\t\t}\n\t}\n}", "func (p *Pipeline) ExecuteBuffered(ctx context.Context, src InputSource, sink OutputSink, bufsize int) error {\n\tvar cancel context.CancelFunc\n\tctx, cancel = context.WithCancel(ctx)\n\n\t// Create channels for wiring together the InputSource, the pipeline\n\t// Stage instances, and the OutputSink\n\tstageCh := make([]chan Data, len(p.stages)+1)\n\tfor i := 0; i < len(stageCh); i++ {\n\t\tstageCh[i] = make(chan Data, bufsize)\n\t}\n\terrQueue := queue.NewQueue()\n\n\tvar wg sync.WaitGroup\n\t// Start a goroutine for each Stage\n\tfor i := 0; i < len(p.stages); i++ {\n\t\twg.Add(1)\n\t\tgo func(idx int) {\n\t\t\tp.stages[idx].Run(ctx, &params{\n\t\t\t\tstage: idx + 1,\n\t\t\t\tinCh: stageCh[idx],\n\t\t\t\toutCh: stageCh[idx+1],\n\t\t\t\terrQueue: errQueue,\n\t\t\t})\n\t\t\t// Tell the next Stage that no more Data is available\n\t\t\tclose(stageCh[idx+1])\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\n\t// Start goroutines for the InputSource and OutputSink\n\twg.Add(2)\n\tgo func() {\n\t\tinputSourceRunner(ctx, src, stageCh[0], errQueue)\n\t\t// Tell the next Stage that no more Data is available\n\t\tclose(stageCh[0])\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\toutputSinkRunner(ctx, sink, stageCh[len(stageCh)-1], errQueue)\n\t\twg.Done()\n\t}()\n\n\t// Monitor for completion of the pipeline execution\n\tgo func() {\n\t\twg.Wait()\n\t\tcancel()\n\t}()\n\n\tvar err error\n\t// Collect any emitted errors and wrap them in a multi-error\n\tselect {\n\tcase <-ctx.Done():\n\tcase <-errQueue.Signal:\n\t\terrQueue.Process(func(e interface{}) {\n\t\t\tif qErr, ok := e.(error); ok {\n\t\t\t\terr = multierror.Append(err, qErr)\n\t\t\t}\n\t\t})\n\t\tcancel()\n\t}\n\treturn err\n}", "func Main() {\n\tfor {\n\t\tline, err := Conn.GetLine()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tdebug(\"Got line\")\n\n\t\tif Bursted {\n\t\t\ttasks <- line\n\t\t} else {\n\t\t\tdebug(\"begin process line\")\n\t\t\tProcessLine(line)\n\t\t\tdebug(\"End process line\")\n\t\t}\n\t}\n\n\twg.Wait()\n}", "func (r *Reader) Run(ctx context.Context, outChan chan cortex_chunk.Chunk) {\n\terrChan := make(chan error)\n\tdefer close(outChan)\n\n\treadCtx, cancel := context.WithCancel(ctx)\n\n\t// starting workers\n\tfor i := 0; i < r.cfg.NumWorkers; i++ {\n\t\tr.workerGroup.Add(1)\n\t\tgo r.readLoop(readCtx, outChan, errChan)\n\t}\n\n\tgo func() {\n\t\t// cancel context when an error occurs or errChan is closed\n\t\tdefer cancel()\n\n\t\terr := <-errChan\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\tlogrus.WithError(err).Errorln(\"error scanning chunks, stopping read operation\")\n\t\t\tclose(r.quit)\n\t\t}\n\t}()\n\n\tscanRequests := r.planner.Plan()\n\tlogrus.Infof(\"built %d plans for reading\", len(scanRequests))\n\n\tdefer func() {\n\t\t// lets wait for all workers to finish before we return.\n\t\t// An error in errChan would cause all workers to stop because we cancel the context.\n\t\t// Otherwise closure of scanRequestsChan(which is done after sending all the scanRequests) should make all workers to stop.\n\t\tr.workerGroup.Wait()\n\t\tclose(errChan)\n\t}()\n\n\t// feeding scan requests to workers\n\tfor _, req := range scanRequests {\n\t\tselect {\n\t\tcase r.scanRequestsChan <- req:\n\t\t\tcontinue\n\t\tcase <-r.quit:\n\t\t\treturn\n\t\t}\n\t}\n\n\t// all scan requests are fed, close the channel\n\tclose(r.scanRequestsChan)\n}", "func (p *Pipeline) Process(ctx context.Context, source Source, sink Sink) error {\n\tvar wg sync.WaitGroup\n\tpCtx, ctxCancelFn := context.WithCancel(ctx)\n\n\t// Allocate channels for wiring together the source, the pipeline stages\n\t// and the output sink. The output of the i_th stage is used as an input\n\t// for the i+1_th stage. We need to allocate one extra channel than the\n\t// number of stages so we can also wire the source/sink.\n\tstageCh := make([]chan Payload, len(p.stages)+1)\n\terrCh := make(chan error, len(p.stages)+2)\n\tfor i := 0; i < len(stageCh); i++ {\n\t\tstageCh[i] = make(chan Payload)\n\t}\n\n\t// Start a worker for each stage\n\tfor i := 0; i < len(p.stages); i++ {\n\t\twg.Add(1)\n\t\tgo func(stageIndex int) {\n\t\t\tp.stages[stageIndex].Run(pCtx, &workerParams{\n\t\t\t\tstage: stageIndex,\n\t\t\t\tinCh: stageCh[stageIndex],\n\t\t\t\toutCh: stageCh[stageIndex+1],\n\t\t\t\terrCh: errCh,\n\t\t\t})\n\n\t\t\t// Signal next stage that no more data is available.\n\t\t\tclose(stageCh[stageIndex+1])\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\n\t// Start source and sink workers\n\twg.Add(2)\n\tgo func() {\n\t\tsourceWorker(pCtx, source, stageCh[0], errCh)\n\n\t\t// Signal next stage that no more data is available.\n\t\tclose(stageCh[0])\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tsinkWorker(pCtx, sink, stageCh[len(stageCh)-1], errCh)\n\t\twg.Done()\n\t}()\n\n\t// Close the error channel once all workers exit.\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errCh)\n\t\tctxCancelFn()\n\t}()\n\n\t// Collect any emitted errors and wrap them in a multi-error.\n\tvar err error\n\tfor pErr := range errCh {\n\t\terr = multierror.Append(err, pErr)\n\t\tctxCancelFn()\n\t}\n\treturn err\n}", "func main() {\n\n\t// Create a new feed.\n\tfeed := feed.NewFeed()\n\n\t// Initialize a new queue.\n\tqueue := queue.NewQueue()\n\n\t// If command line arguments are not given, then run the tasks sequentially\n\tif len(os.Args) != 3 {\n\t\tscanner := bufio.NewScanner(os.Stdin)\n\t\tfor scanner.Scan() {\n\t\t\ttask := scanner.Text()\n\t\t\ttaskJSONBytes := []byte(task)\n\t\t\tvar cm ClientMessage\n\t\t\terr := json.Unmarshal(taskJSONBytes, &cm)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"error: \", err)\n\t\t\t}\n\t\t\tif cm.Command == \"ADD\" { // Add a post.\n\t\t\t\taddPostTask(feed, cm)\n\t\t\t} else if cm.Command == \"REMOVE\" { // Remove a post.\n\t\t\t\tremovePostTask(feed, cm)\n\t\t\t} else if cm.Command == \"CONTAINS\" { // See if feed contains a post.\n\t\t\t\tcontainsPostTask(feed, cm)\n\t\t\t} else if cm.Command == \"FEED\" { // Visualize the feed.\n\t\t\t\tshowFeedTask(feed, cm)\n\t\t\t} else if cm.Command == \"DONE\" { // Stop reading from stdin.\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t} else { // Otherwise spawn threads as consumers and produce tasks to queue\n\n\t\t// Read in command line arguments.\n\t\tthreads, _ := strconv.ParseInt(os.Args[1], 10, 64)\n\t\tblock, _ := strconv.ParseInt(os.Args[2], 10, 64)\n\n\t\t// Initialize sync mechanisms.\n\t\tvar wg sync.WaitGroup\n\t\tvar mtx sync.Mutex\n\t\tvar numOfTasks int64\n\t\tdoneBool := false\n\n\t\tcondVar := sync.NewCond(&mtx)\n\t\tcontext := SharedContext{wg: &wg, cond: condVar, mutex: &mtx, numOfTasks: &numOfTasks, doneBool: &doneBool}\n\n\t\t// Spawn goroutines\n\t\tfor i := int64(0); i < threads; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo consumer(i, block, feed, queue, &context)\n\t\t}\n\n\t\t// Start producing tasks.\n\t\tproducer(queue, &context)\n\n\t\twg.Wait()\n\n\n\t}\n}", "func (prod *InfluxDB) Produce(workers *sync.WaitGroup) {\n\tprod.BatchMessageLoop(workers, prod.sendBatch)\n}", "func (c *Command) run() {\n\tdefer c.done()\n\tlog.Println(\"Executing \", c.Command)\n\tvar oscmd *exec.Cmd\n\n\tif len(c.parsed) > 1 {\n\t\toscmd = exec.Command(c.parsed[0], c.parsed[1:]...)\n\t} else {\n\t\toscmd = exec.Command(c.parsed[0])\n\t}\n\tif c.session.cwd != \"\" {\n\t\toscmd.Dir = c.session.cwd\n\t}\n\n\tstdout, err := oscmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tc.push(c.Id, \"console\", err.Error())\n\t\treturn\n\t}\n\tstderr, err := oscmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tc.push(c.Id, \"console\", err.Error())\n\t\treturn\n\t}\n\n\terr = oscmd.Start()\n\tif err != nil {\n\t\tc.push(c.Id, \"console\", err.Error())\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tc.session.processes[c.Id] = oscmd.Process.Pid\n\n\treader := bufio.NewReader(stdout)\n\treaderErr := bufio.NewReader(stderr)\n\tgo c.readAndPush(readerErr)\n\tc.readAndPush(reader)\n\n\toscmd.Wait()\n}", "func (p *StreamToSubStream) Run() {\n\tdefer p.OutSubStream.Close()\n\n\tscipipe.Debug.Println(\"Creating new information packet for the substream...\")\n\tsubStreamIP := scipipe.NewIP(\"\")\n\tscipipe.Debug.Printf(\"Setting in-port of process %s to IP substream field\\n\", p.Name())\n\tsubStreamIP.SubStream = p.In\n\n\tscipipe.Debug.Printf(\"Sending sub-stream IP in process %s...\\n\", p.Name())\n\tp.OutSubStream.Send(subStreamIP)\n\tscipipe.Debug.Printf(\"Done sending sub-stream IP in process %s.\\n\", p.Name())\n}", "func main() {\n\tgo getData()\n\tfmt.Println(\"Subscription started\")\n\tfor {\n\t\tselect {\n\t\tcase val := <-Data:\n\t\t\tprocessData(val)\n\t\tcase <-Stop:\n\t\t\tfmt.Println(\"Stop\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func processorsHandler(w http.ResponseWriter, r *http.Request) {\n\tvar wg sync.WaitGroup\n\n\t// Chunk the text into sentences\n\tc := NewSentenceChunker(appText)\n\tchunks, _ := c.Chunk()\n\n\t// Send each chunk into a gorountine to process\n\tfor _, c := range chunks {\n\t\twg.Add(1)\n\t\tgo func(c *Chunk) {\n\t\t\tdefer wg.Done()\n\t\t\tc = processors.Process(c)\n\t\t}(c)\n\t}\n\n\t// Wait for the processing to finish\n\twg.Wait()\n\n\tappResult = chunks\n\n\tw.Header().Set(\"Location\", \"/results\")\n\tw.WriteHeader(http.StatusTemporaryRedirect)\n}", "func main() {\n\tadapter.RunStage(split, chunk, join)\n}", "func main() {\n\trecordChan := make(chan recWrap)\n\tdefer close(recordChan)\n\tif len(os.Args) != 2{\n\t\tfmt.Println(\"Use format: ./commandExecuter <command_file.txt>\")\n\t\tlog.Fatal()\n\t}\n\tcmdFile := os.Args[1]\n\twg.Add(2)\n\tgo cmdProducer(cmdFile, recordChan)\n\tgo cmdConsumer(recordChan)\n\twg.Wait()\n}", "func (a *Agent) Run(ctx context.Context) error {\n\ta.Context = ctx\n\tlog.Printf(\"I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, \"+\n\t\t\"Flush Interval:%s\",\n\t\ta.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,\n\t\ta.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)\n\n\tlog.Printf(\"D! [agent] Initializing plugins\")\n\terr := a.initPlugins()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstartTime := time.Now()\n\tlog.Printf(\"D! [agent] Connecting outputs\")\n\tnext, ou, err := a.startOutputs(ctx, a.Config.Outputs)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.ou = ou\n\tvar apu []*processorUnit\n\tvar au *aggregatorUnit\n\tif len(a.Config.Aggregators) != 0 {\n\t\taggC := next\n\t\tif len(a.Config.AggProcessors) != 0 {\n\t\t\taggC, apu, err = a.startProcessors(next, a.Config.AggProcessors)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tnext, au, err = a.startAggregators(aggC, next, a.Config.Aggregators)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar pu []*processorUnit\n\tif len(a.Config.Processors) != 0 {\n\t\tnext, pu, err = a.startProcessors(next, a.Config.Processors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tiu, err := a.startInputs(next, a.Config.Inputs)\n\ta.iu = iu\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\terr := a.runOutputs(ou)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"E! [agent] Error running outputs: %v\", err)\n\t\t}\n\t}()\n\n\tif au != nil {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.runProcessors(apu)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"E! [agent] Error running processors: %v\", err)\n\t\t\t}\n\t\t}()\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.runAggregators(startTime, au)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"E! [agent] Error running aggregators: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif pu != nil {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.runProcessors(pu)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"E! [agent] Error running processors: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\terr := a.runInputs(ctx, startTime, iu)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"E! [agent] Error running inputs: %v\", err)\n\t\t}\n\t}()\n\n\twg.Wait()\n\n\tlog.Printf(\"D! [agent] Stopped Successfully\")\n\treturn err\n}", "func runActivation(db *deep6.Deep6DB, crdtm *crdt.CRDTManager, gqlm *n3gql.GQLManager) error {\n\n\t// create context to manage pipeline\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\tdefer cancelFunc()\n\n\t// start the stream listener for this context\n\titerator, err := crdtm.StartReceiver()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// error channels to monitor pipeline\n\tvar errcList []<-chan error\n\n\t// create a splitter for the stream\n\tstreamIterator1, streamIterator2, errc, err := streamSplitter(ctx, iterator)\n\tif err != nil {\n\t\treturn err\n\t}\n\terrcList = append(errcList, errc)\n\n\t// create the db sink stage\n\terrc, err = connectDB(ctx, db, streamIterator1)\n\tif err != nil {\n\t\treturn err\n\t}\n\terrcList = append(errcList, errc)\n\n\t// create the gql sink stage\n\terrc, err = connectGQL(ctx, gqlm, streamIterator2)\n\tif err != nil {\n\t\treturn err\n\t}\n\terrcList = append(errcList, errc)\n\n\treturn WaitForPipeline(errcList...)\n\n}", "func (gq *Dispatch) next() {\n for true {\n // Attempt to start processing the file.\n gq.pLock.Lock()\n if gq.processing >= gq.MaxGo {\n gq.waitingToRun = true\n gq.nextWait.Add(1)\n gq.pLock.Unlock()\n gq.nextWait.Wait()\n continue\n }\n // Keep the books and reset wait time before unlocking.\n gq.processing++\n gq.pLock.Unlock()\n\n // Get an element from the queue.\n gq.qLock.Lock()\n var wrapper = gq.queue.Dequeue().(queues.RegisteredTask)\n gq.qLock.Unlock()\n\n // Begin processing and asyncronously return.\n //var task = taskelm.Value.(dispatchTaskWrapper)\n var task = wrapper.Func()\n go task(wrapper.Id())\n return\n }\n}", "func (proc *Processor) Process(p *peer.Peer, msgType message.Type, data []byte) {\n\tproc.wp.Submit(p, msgType, data)\n}", "func (stc *ScatterConn) StreamExecute(query string, bindVars map[string]interface{}, keyspace string, shards []string, sendReply func(reply interface{}) error) error {\n\tstc.mu.Lock()\n\tdefer stc.mu.Unlock()\n\n\tif stc.transactionId != 0 {\n\t\treturn fmt.Errorf(\"cannot stream in a transaction\")\n\t}\n\tresults := make(chan *mproto.QueryResult, len(shards))\n\tallErrors := new(concurrency.AllErrorRecorder)\n\tvar wg sync.WaitGroup\n\tfor shard := range unique(shards) {\n\t\twg.Add(1)\n\t\tgo func(shard string) {\n\t\t\tdefer wg.Done()\n\t\t\tsdc, _ := stc.getConnection(keyspace, shard)\n\t\t\tsr, errFunc := sdc.StreamExecute(query, bindVars)\n\t\t\tfor qr := range sr {\n\t\t\t\tresults <- qr\n\t\t\t}\n\t\t\terr := errFunc()\n\t\t\tif err != nil {\n\t\t\t\tallErrors.RecordError(err)\n\t\t\t}\n\t\t}(shard)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(results)\n\t}()\n\tvar replyErr error\n\tfor innerqr := range results {\n\t\t// We still need to finish pumping\n\t\tif replyErr != nil {\n\t\t\tcontinue\n\t\t}\n\t\treplyErr = sendReply(innerqr)\n\t}\n\tif replyErr != nil {\n\t\tallErrors.RecordError(replyErr)\n\t}\n\treturn allErrors.Error()\n}", "func (t *task) run(ctx context.Context) {\n\tgo func() {\n\t\tresult, err := t.handler(ctx, t.request)\n\t\tt.resultQ <- Response{Result: result, Err: err} // out channel is buffered by 1\n\t\tt.running = false\n\t\tclose(t.resultQ)\n\t}()\n}", "func process(w io.Writer, concurrency int, limit int, timeout int, sitemapURL string, headers []parameter, query []parameter) bool {\n\twritesToStdout := w == os.Stdout\n\n\tif writesToStdout {\n\t\tuiprogress.Start()\n\t}\n\n\t// Create two channels for our pipeline\n\ttasks := make(chan URL)\n\tresults := make(chan URL)\n\t// Create pre-configured client\n\tclient := newClient()\n\t// Define timeout for workers' pool\n\tworkerTimeout := time.Duration(1000000 * timeout)\n\n\tsitemap, err := requestSitemap(client, sitemapURL, headers)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error: Failed to download the sitemap: %v\", err)\n\t}\n\n\tif len(sitemap.URLS) == 0 {\n\t\tlog.Fatalf(\"Error: The sitemap is empty\")\n\t}\n\n\tvar entiesNum int\n\tif len(sitemap.URLS) > limit && limit > 0 {\n\t\tentiesNum = len(sitemap.URLS[:limit])\n\t} else {\n\t\tentiesNum = len(sitemap.URLS)\n\t}\n\n\tbar := makeProgressBar(entiesNum)\n\n\t// Spawn workers\n\tfor w := 1; w <= concurrency; w++ {\n\t\tworker := newWorker(workerTimeout, tasks, results)\n\t\tgo worker.Perform(func(url URL) URL {\n\t\t\tstatusCode, err := requestPage(client, appendQuery(url.Loc, query), headers)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error: %v\", err)\n\t\t\t}\n\n\t\t\turl.StatusCode = statusCode\n\t\t\treturn url\n\t\t})\n\t}\n\n\t// Spawn tasks producer\n\tproducer := newProducer(tasks)\n\tgo producer.Perform(sitemap.URLS[:entiesNum])\n\n\t// Create a consumer and join results\n\tconsumer := newConsumer(results)\n\treport := consumer.Perform(entiesNum, func() {\n\t\tif writesToStdout {\n\t\t\tbar.Incr()\n\t\t}\n\t})\n\n\t// Stop the progressbar\n\tif writesToStdout {\n\t\tuiprogress.Stop()\n\t}\n\n\tvar failed []URL\n\n\t// // Write a report\n\t// drawTable(w, report)\n\n\tfor _, url := range report {\n\t\tif url.StatusCode != 200 {\n\t\t\tfailed = append(failed, url)\n\t\t}\n\t}\n\n\tif len(failed) > 0 {\n\t\tdrawTable(w, failed)\n\t} else {\n\t\tfmt.Println(\"+-------------------+\\n| NO PROBLEMS FOUND |\\n+-------------------+\")\n\t}\n\n\treturn len(failed) == 0\n}", "func (a *Aggregator) Run() {\n\tevents := make(chan []byte)\n\turls := make(chan URL)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t// Catch SIGINT/SIGTERM signals and call cancel() before exiting to\n\t// gracefully stop goroutines\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)\n\n\tgo func() {\n\t\t<-signalCh\n\t\tcancel()\n\t\tos.Exit(1)\n\t}()\n\n\t// Run an event listener goroutine, compute aggregation on `ServerStatus`\n\t// events coming from the message queue\n\tgo func(ctx context.Context) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-events:\n\t\t\t\tvar status ServerStatus\n\t\t\t\terr := json.Unmarshal(event, &status)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.logger.Println(\"Error decoding status event\")\n\t\t\t\t} else {\n\t\t\t\t\ta.aggregate(&status)\n\t\t\t\t\turls <- status.Url\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\ta.mq.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctx)\n\n\t// Just print results of aggreation fo each received URL\n\tgo func(ctx context.Context) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase url := <-urls:\n\t\t\t\tstats, _ := a.servers[url]\n\t\t\t\ta.logger.Printf(\"%s alive=%v avail.(%%)=%.2f res(ms)=%v min(ms)=%v max(ms)=%v avg(ms)=%v status_codes=%v\\n\",\n\t\t\t\t\turl, stats.Alive, stats.Availability,\n\t\t\t\t\tstats.LatestResponseTime, stats.MovingAverageStats.Min(),\n\t\t\t\t\tstats.MovingAverageStats.Max(), stats.MovingAverageStats.Mean(),\n\t\t\t\t\tstats.ResponseStatusMap)\n\t\t\t\t// Send stats to presenter\n\t\t\t\tpresenterStats := Stats{\n\t\t\t\t\tUrl: url,\n\t\t\t\t\tAlive: stats.Alive,\n\t\t\t\t\tAvgResponseTime: stats.MovingAverageStats.Mean(),\n\t\t\t\t\tAvailability: stats.Availability,\n\t\t\t\t\tStatusCodes: stats.ResponseStatusMap,\n\t\t\t\t}\n\t\t\t\tpayload, err := json.Marshal(presenterStats)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.logger.Println(\"Unable to marshal presenter stats\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ta.mq.Produce(\"stats\", payload)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctx)\n\n\tif err := a.mq.Consume(\"urlstatus\", 1, events); err != nil {\n\t\ta.logger.Fatal(err)\n\t}\n}", "func (c *client) doPipeline(commands []commandPair) (interface{}, error) {\n\tconn, ok := c.timedBorrow()\n\tif !ok {\n\t\treturn nil, ErrNoConnection\n\t}\n\n\tif err := conn.Send(\"MULTI\"); err != nil {\n\t\tc.release(conn, err)\n\t\treturn nil, err\n\t}\n\n\tfor _, command := range commands {\n\t\tif err := conn.Send(command.command, command.args...); err != nil {\n\t\t\tc.release(conn, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tresult, err := conn.Do(\"EXEC\")\n\tc.release(conn, err)\n\treturn result, err\n}", "func Execute(path string, fs afero.Fs, w io.Writer, modules ...Module) {\n\tvar renderers []renderer\n\tvar workers []worker\n\n\tfor _, m := range modules {\n\t\trenderers = append(renderers, m.renderers()...)\n\t\tworkers = append(workers, m.workers()...)\n\t}\n\n\tvar handlers []sys.ScanHandler\n\tfor _, wo := range workers {\n\t\two.init()\n\t\thandlers = append(handlers, wo.handler)\n\t}\n\n\tsys.Scan(path, fs, handlers)\n\n\tfor _, wo := range workers {\n\t\two.finalize()\n\t}\n\n\trender(w, renderers)\n}", "func (m *Manager) run() {\n\tfor i := 0; i < m.workerPool.MaxWorker; i++ {\n\t\twID := i + 1\n\t\t//log.Printf(\"[workerPool] worker %d spawned\", wID)\n\t\tgo func(workerID int) {\n\t\t\tfor task := range m.workerPool.queuedTaskC {\n\t\t\t\tlog.Printf(\"[workerPool] worker %d is processing task\", wID)\n\t\t\t\ttask()\n\t\t\t\tlog.Printf(\"[workerPool] worker %d has finished processing task\", wID)\n\t\t\t}\n\t\t}(wID)\n\t}\n}", "func (p *Printer) run() {\n\tdefer close(p.ch)\n\tconn, err := p.ln.Accept()\n\tif err != nil {\n\t\treturn\n\t}\n\tp.conn = conn\n\n\t// If Close() has been called, close the connection.\n\tif atomic.SwapInt32(&p.state, 2) == 1 {\n\t\tconn.Close()\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn\n\t}\n\tp.ch <- data\n}", "func (brw *blockRetrievalWorker) run() {\n\tfor {\n\t\terr := brw.HandleRequest()\n\t\t// Only io.EOF is relevant to the loop; other errors are handled in\n\t\t// FinalizeRequest\n\t\tif err == io.EOF {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *SimplePipeline) Do(fs ...ProcessFunc) uint {\n\tout := NewSimpleBuffer(p.BufSize())\n\tdefer out.Free()\n\treturn p.do(fs)\n}", "func main() {\n\n\tc_amount_pages := make(chan int)\n\tgo fn_get__amount_pages(c_amount_pages)\n\tint_amount := <- c_amount_pages\n\n\tfn_get__job_infos(int_amount)\n}", "func (g *Generator) Execute() {\n\tg.lock.Lock()\n\tdefer g.lock.Unlock()\n\n\t// Check if any patterns have been added to the Generator\n\tif len(g.patterns) == 0 {\n\t\treturn\n\t}\n\n\t// pSum is the sum of the scores of all patterns\n\tpSum := 0\n\tfor _, p := range g.patterns {\n\t\tpSum += p.Probability\n\t}\n\tvar wg = sync.WaitGroup{}\n\tfor i := uint32(0); i < g.concurrency; i++ {\n\t\twg.Add(1)\n\t\tgo g.execute(&wg, pSum)\n\t}\n\n\tfor i := uint32(0); i < g.maxItems; i++ {\n\t\tg.inputChan <- true\n\t}\n\n\twg.Wait()\n}", "func (sO *ScreenOutput) Run() {\n\tfor _, channel := range sO.DataInput {\n\t\tgo sO.runChannelInput(channel)\n\t}\n}", "func (o *KinesisOutput) RunOutputLoop() {\n\tdt := &Dnstap{}\n\tfor frame := range o.outputChannel {\n\t\tif err := proto.Unmarshal(frame, dt); err != nil {\n\t\t\tlog.Fatalf(\"dnstap.TextOutput: proto.Unmarshal() failed: %s\\n\", err)\n\t\t\tcontinue\n\t\t}\n\t\tbuf, ok := o.format(dt)\n\t\tif !ok {\n\t\t\tlog.Fatalf(\"dnstap.TextOutput: text format function failed\\n\")\n\t\t\tcontinue\n\t\t}\n\t\t//Send buf to kinesis\n\t\t_, err := o.client.PutRecord(&kinesis.PutRecordInput{\n\t\t\tData: buf,\n\t\t\tStreamName: aws.String(o.streamname),\n\t\t\tPartitionKey: aws.String(o.PartitionKey),\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"aws client PutRecord() failed: %s\\n\", err)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclose(o.wait)\n}", "func processTxnData(errorChan chan<- error, p *newrelic.Processor) {\n\tdefer crashGuard(\"processor\", errorChan)\n\n\terr := p.Run()\n\tif err != nil {\n\t\terrorChan <- &workerError{\n\t\t\tComponent: \"processor\",\n\t\t\tRespawn: true,\n\t\t\tErr: err,\n\t\t}\n\t}\n}", "func runProc(proc *goka.Processor) (*goka.Processor, context.CancelFunc, chan error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(done)\n\t\tdone <- proc.Run(ctx)\n\t}()\n\n\treturn proc, cancel, done\n}", "func main() {\n\tprocessor.RegisterProcessors(Process)\n}", "func (runner *McRunner) processOutput() {\n\trunner.WaitGroup.Add(1)\n\tdefer runner.WaitGroup.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-runner.killChannel:\n\t\t\treturn\n\t\tdefault:\n\t\t\tbuf := make([]byte, 256)\n\t\t\tn, err := runner.outPipe.Read(buf)\n\t\t\tstr := string(buf[:n])\n\n\t\t\tif (err == nil) && (n > 1) {\n\t\t\t\tif runner.Settings.PassthroughStdOut {\n\t\t\t\t\tfmt.Print(str)\n\t\t\t\t}\n\t\t\t\tmsgExp, _ := regexp.Compile(\"\\\\[.*\\\\] \\\\[.*INFO\\\\] \\\\[.*DedicatedServer\\\\]: <.*>\")\n\t\t\t\ttpsExp, _ := regexp.Compile(\"\\\\[.*\\\\] \\\\[.*INFO\\\\] \\\\[.*DedicatedServer\\\\]: Dim\")\n\t\t\t\tplayerExp, _ := regexp.Compile(\"\\\\[.*\\\\] \\\\[.*INFO\\\\] \\\\[.*DedicatedServer\\\\]: There are\")\n\t\t\t\tdoneExp, _ := regexp.Compile(\"\\\\[.*\\\\] \\\\[.*INFO\\\\] \\\\[.*DedicatedServer\\\\]: Done\")\n\n\t\t\t\tif runner.State == Starting {\n\t\t\t\t\tif doneExp.Match(buf) {\n\t\t\t\t\t\trunner.State = Running\n\t\t\t\t\t\tfmt.Println(\"Minecraft server done loading.\")\n\t\t\t\t\t}\n\t\t\t\t} else if runner.State == Running {\n\t\t\t\t\tif msgExp.Match(buf) {\n\t\t\t\t\t\trunner.MessageChannel <- str[strings.Index(str, \"<\"):]\n\t\t\t\t\t} else if tpsExp.Match(buf) {\n\t\t\t\t\t\tcontent := str[strings.Index(str, \"Dim\"):]\n\n\t\t\t\t\t\tnumExp, _ := regexp.Compile(\"[+-]?([0-9]*[.])?[0-9]+\")\n\t\t\t\t\t\tnums := numExp.FindAllString(content, -1)\n\t\t\t\t\t\tdim, _ := strconv.Atoi(nums[0])\n\t\t\t\t\t\ttps, _ := strconv.ParseFloat(nums[len(nums)-1], 32)\n\n\t\t\t\t\t\tm := make(map[int]float32)\n\t\t\t\t\t\tm[dim] = float32(tps)\n\n\t\t\t\t\t\trunner.tpsChannel <- m\n\t\t\t\t\t} else if playerExp.Match(buf) {\n\t\t\t\t\t\tcontent := str[strings.Index(str, \"There\"):]\n\n\t\t\t\t\t\tnumExp, _ := regexp.Compile(\"[+-]?([0-9]*[.])?[0-9]+\")\n\t\t\t\t\t\tplayers, _ := strconv.Atoi(numExp.FindString(content))\n\n\t\t\t\t\t\trunner.playerChannel <- players\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Service) run() {\n\n\t// Create a communicator for sending and receiving packets.\n\tcommunicator := comm.NewCommunicator(s.config.PollInterval, s.config.Port)\n\tdefer communicator.Stop()\n\n\t// Create a ticker for sending pings.\n\tpingTicker := time.NewTicker(s.config.PingInterval)\n\tdefer pingTicker.Stop()\n\n\t// Create a ticker for timeout checks.\n\tpeerTicker := time.NewTicker(s.config.PeerTimeout)\n\tdefer peerTicker.Stop()\n\n\t// Create the packet that will be sent to all peers.\n\tpkt := &comm.Packet{\n\t\tID: s.config.ID,\n\t\tUserData: s.config.UserData,\n\t}\n\n\t// Continue processing events until explicitly stopped.\n\tfor {\n\t\tselect {\n\t\tcase p := <-communicator.PacketChan:\n\t\t\ts.processPacket(p)\n\t\tcase <-pingTicker.C:\n\t\t\tcommunicator.Send(pkt)\n\t\tcase <-peerTicker.C:\n\t\t\ts.processPeers()\n\t\tcase <-s.stopChan:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *Pool) Run(w Worker) {\n p.job <- w\n}", "func Run(n int, b bool) {\n\n\tvar file *os.File\n\tif b {\n\t\tfile, _ = os.Create(\"debug.out\")\n\t}\n\n\tdefer file.Close()\n\n\t// Since the program starts here, let's make a channel to receive requests.\n\t// The buffer size is completely arbitrary, just prevents the sender from blocking too soon.\n\trequestCh := make(chan []string, 100)\n\tidCh := make(chan string)\n\n\t// If you want to play with us you need to register your Sender here.\n\tgo publisher.Sender(requestCh)\n\tgo makeID(idCh)\n\n\t// Our request pool\n\tfor i := 1; i <= n; i++ {\n\n\t\t// DEBUG\n\t\t//fmt.Println(runtime.NumGoroutine())\n\n\t\t// get request\n\t\trequest := <-requestCh\n\n\t\t// add i as ID\n\t\trequest = append(request, <-idCh)\n\n\t\tif b {\n\t\t\tfor _, elem := range request {\n\t\t\t\tfile.WriteString(elem)\n\t\t\t\tfile.WriteString(\" \")\n\t\t\t}\n\t\t\tfile.WriteString(\"\\n\")\n\t\t\tfile.Sync()\n\t\t}\n\n\t\tdistributor(request)\n\n\t\t// Send the result back to the publisher\n\t\tpublisher.Receiver(<-resultCh)\n\t}\n}", "func execute(ctx context.Context, c string, executionMap map[component][]job) {\n\tvar wg sync.WaitGroup\n\n\tstart := time.Now()\n\n\tjobs := executionMap[component(c)]\n\tstatusChan := make(chan jobStatus, len(jobs))\n\twg.Add(len(jobs))\n\n\tif len(jobs) > 0 {\n\t\tfor _, job := range jobs {\n\t\t\tgo worker(ctx, statusChan, &wg, job)\n\t\t}\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(statusChan)\n\t}()\n\n\temptyJob := jobStatus{}\n\tfor status := range statusChan {\n\t\tlog.Infof(\"Job Status: %v\", status)\n\t\tif status != emptyJob {\n\t\t\tfinishedJobs = append(finishedJobs, status)\n\t\t\tif status.status == true {\n\t\t\t\tlog.Infof(\"Following job executed: %v\", status.job)\n\t\t\t} else if status.status == false {\n\t\t\t\tlog.Infof(\"Following job failed: `%v` with error: %s\", status.job, status.err)\n\t\t\t}\n\t\t}\n\t}\n\n\tt := time.Now()\n\taddDuration(t.Sub(start))\n}", "func (bf *brainfog) run() {\n\tfor bf.ip < len(bf.program) {\n\t\terr := bf.doInstruction()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tclose(bf.outCh)\n}", "func (e *BatchEngine) Run(ctx context.Context) error {\n\te.pollMessages(ctx, e.stream)\n\treturn e.processorError\n}", "func (c *Controllor) Run(ctx context.Context) {\n\tlog.Logger.Info(\"running...\")\n\tenv := gutils.Settings.GetString(\"env\")\n\n\tjournal := c.initJournal(ctx)\n\n\treceivers := c.initRecvs(env)\n\tacceptor := c.initAcceptor(ctx, journal, receivers)\n\tacceptorPipeline, err := c.initAcceptorPipeline(ctx, env)\n\tif err != nil {\n\t\tlog.Logger.Panic(\"initAcceptorPipeline\", zap.Error(err))\n\t}\n\n\twaitCommitChan := journal.GetCommitChan()\n\twaitAccepPipelineSyncChan := acceptor.GetSyncOutChan()\n\twaitAccepPipelineAsyncChan := acceptor.GetAsyncOutChan()\n\twaitDumpChan, skipDumpChan := acceptorPipeline.Wrap(ctx, waitAccepPipelineAsyncChan, waitAccepPipelineSyncChan)\n\n\t// after `journal.DumpMsgFlow`, every discarded msg should commit to waitCommitChan\n\twaitDispatchChan := journal.DumpMsgFlow(ctx, c.msgPool, waitDumpChan, skipDumpChan)\n\n\ttagPipeline := c.initTagPipeline(ctx, env, waitCommitChan)\n\tdispatcher := c.initDispatcher(ctx, waitDispatchChan, tagPipeline)\n\twaitPostPipelineChan := dispatcher.GetOutChan()\n\tpostPipeline := c.initPostPipeline(env, waitCommitChan)\n\twaitProduceChan := postPipeline.Wrap(ctx, waitPostPipelineChan)\n\tproducerSenders := c.initSenders(env)\n\tproducer := c.initProducer(env, waitProduceChan, waitCommitChan, producerSenders)\n\n\t// heartbeat\n\tgo c.runHeartBeat(ctx)\n\n\t// monitor\n\tmonitor.AddMetric(\"controllor\", func() map[string]interface{} {\n\t\treturn map[string]interface{}{\n\t\t\t\"goroutine\": runtime.NumGoroutine(),\n\t\t\t\"waitAccepPipelineSyncChanLen\": len(waitAccepPipelineSyncChan),\n\t\t\t\"waitAccepPipelineSyncChanCap\": cap(waitAccepPipelineSyncChan),\n\t\t\t\"waitAccepPipelineAsyncChanLen\": len(waitAccepPipelineAsyncChan),\n\t\t\t\"waitAccepPipelineAsyncChanCap\": cap(waitAccepPipelineAsyncChan),\n\t\t\t\"waitDumpChanLen\": len(waitDumpChan),\n\t\t\t\"waitDumpChanCap\": cap(waitDumpChan),\n\t\t\t\"skipDumpChanLen\": len(skipDumpChan),\n\t\t\t\"skipDumpChanCap\": cap(skipDumpChan),\n\t\t\t\"waitDispatchChanLen\": len(waitDispatchChan),\n\t\t\t\"waitDispatchChanCap\": cap(waitDispatchChan),\n\t\t\t\"waitPostPipelineChanLen\": len(waitPostPipelineChan),\n\t\t\t\"waitPostPipelineChanCap\": cap(waitPostPipelineChan),\n\t\t\t\"waitProduceChanLen\": len(waitProduceChan),\n\t\t\t\"waitProduceChanCap\": cap(waitProduceChan),\n\t\t\t\"waitCommitChanLen\": len(waitCommitChan),\n\t\t\t\"waitCommitChanCap\": cap(waitCommitChan),\n\t\t}\n\t})\n\tmonitor.BindHTTP(server)\n\n\tgo producer.Run(ctx)\n\tRunServer(ctx, gutils.Settings.GetString(\"addr\"))\n}", "func (self *averageCache) run() {\n\tvar flushLimit int\n\tvar dataPoints []*whisper.TimeSeriesPoint\n\tfor {\n\t\tselect {\n\t\tcase <- self.closeChan: // The cache is ordered to close\n\t\t\tlog.Debug(\"Close signal\")\n\t\t\tself.close()\n\t\tcase flushLimit = <- self.flushChan: // A flush is queued\n\t\t\tlog.Debug(\"Flush Signal\")\n\t\t\tself.flush(flushLimit)\n\t\tcase dataPoints = <- self.inputChan: // An insert is queued\n\t\t\tlog.Debug(\"Data Signal\")\n\t\t\tself.insert(dataPoints)\n\t\t}\n\t}\n}", "func (p *Plugin) Run() {\n\tp.Log(\"notice\", fmt.Sprintf(\"Start filter v%s\", p.Version))\n\tmyId := qutils.GetGID()\n\tdc := p.QChan.Data.Join()\n\tinputs := p.GetInputs()\n\n\tfor {\n\t\tselect {\n\t\tcase val := <-dc.Read:\n\t\t\tswitch val.(type) {\n\t\t\tcase qtypes.QMsg:\n\t\t\t\tqm := val.(qtypes.QMsg)\n\t\t\t\tif qm.SourceID == myId {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif len(inputs) != 0 && !qutils.IsInput(inputs, qm.Source) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif qutils.IsItem(p.sendData, qm.Source) {\n\t\t\t\t\tqm.SourceID = myId\n\t\t\t\t\tp.QChan.Data.Send(qm)\n\t\t\t\t}\n\t\t\t\tif qutils.IsItem(p.sendBack, qm.Source) {\n\t\t\t\t\tp.QChan.Back.Send(qm)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (this *Handler) Run() {\n\tfor _, host := range this.hosts {\n\t\tfor _, cred := range this.creds {\n\t\t\t// Add it to our channel\n\t\t\tthis.in <- inputs.Data{\n\t\t\t\tTarget: host,\n\t\t\t\tCred: cred,\n\t\t\t}\n\t\t}\n\t}\n}", "func (se *singleExecution) do() error {\n\tvar finalError error\n\tdefer func() {\n\t\tif finalError != nil {\n\t\t\tsessionLogger.Println(color.YellowString(se.hostname) + fmt.Sprintf(\" error %s\", finalError.Error()))\n\t\t}\n\t}()\n\n\t// Each ClientConn can support multiple interactive sessions,\n\t// represented by a Session.\n\tsession, err := se.client.NewSession()\n\tif err != nil {\n\t\tfinalError = fmt.Errorf(\"Failed to create session: %s\", err.Error())\n\t\treturn finalError\n\t}\n\tdefer session.Close()\n\n\tout, err := session.StdoutPipe()\n\tif err != nil {\n\t\tfinalError = fmt.Errorf(\"Couldn't create pipe to Stdout for session: %s\", err.Error())\n\t\treturn finalError\n\t}\n\n\terrOut, err := session.StderrPipe()\n\tif err != nil {\n\t\tfinalError = fmt.Errorf(\"Couldn't create pipe to Stderr for session: %s\", err.Error())\n\t\treturn finalError\n\t}\n\n\tcurrentHost := strings.Split(se.hostname, \":\")[0]\n\n\t// Wait for consumerReaderPipes to fully consume before returning from this function so we can be sure\n\t// all session logs have finished writing for the session when the session is closed.\n\t// This way we don't get a session log line AFTER the `Recipe done` log line.\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tdefer func() {\n\t\twg.Wait()\n\t}()\n\n\t// Consume session Stdout, Stderr pipe async.\n\tgo consumeReaderPipes(&wg, currentHost, out, false, 0)\n\tgo consumeReaderPipes(&wg, currentHost, errOut, true, se.attempt)\n\n\t// Once a Session is created, you can only ever execute a single command.\n\tif err := session.Run(se.command); err != nil {\n\t\t// TODO: use this line for more verbose error logging since Stderr is also displayed.\n\t\t//sessionLogger.Print(color.RedString(currentHost+\":\") + fmt.Sprintf(\" Failed to run the %s command: `%s` - %s\", humanize.Ordinal(index), command, err.Error()))\n\t\treturn err\n\t}\n\tse.success++\n\treturn nil\n}", "func (b *bufferedChan) Run() {\n\tdefer close(b.OutChannel)\n\tfor value := range b.inChannel {\n\t\tselect {\n\t\tcase <-b.ctx.Done():\n\t\t\tfmt.Println(\"Run: Time to return\")\n\t\t\treturn\n\t\tcase b.OutChannel <- value:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func (r *Runner) generate(output chan Result, wg *sizedwaitgroup.SizedWaitGroup) {\n\tif r.options.TargetUrl != \"\" {\n\t\tlog.Info(fmt.Sprintf(\"single target: %s\", r.options.TargetUrl))\n\t\twg.Add()\n\t\tgo r.process(output, r.options.TargetUrl, wg)\n\t} else {\n\t\turls, err := ReadFile(r.options.UrlFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Cann't read url file\")\n\t\t} else {\n\t\t\tlog.Info(fmt.Sprintf(\"Read %d's url totaly\", len(urls)))\n\t\t\tfor _, u := range urls {\n\t\t\t\twg.Add()\n\t\t\t\tgo r.process(output, u, wg)\n\t\t\t}\n\t\t}\n\t}\n}", "func (e *Executor) Run() { e.loop() }" ]
[ "0.6768461", "0.64574254", "0.6350197", "0.6306248", "0.63038504", "0.6270772", "0.62555313", "0.61808825", "0.61413014", "0.6004808", "0.59958047", "0.5968906", "0.5943362", "0.59123737", "0.584534", "0.584344", "0.58390856", "0.5833644", "0.58037674", "0.5782119", "0.57704204", "0.57641715", "0.57512796", "0.575074", "0.57493484", "0.5720139", "0.5709787", "0.57058036", "0.567836", "0.56770545", "0.56505525", "0.5634233", "0.56288695", "0.5613604", "0.5611921", "0.5602752", "0.5597999", "0.5583824", "0.5581432", "0.55763364", "0.5568169", "0.55463684", "0.55260867", "0.5524466", "0.5523906", "0.5523597", "0.55199164", "0.54968476", "0.54847986", "0.54704994", "0.54603446", "0.5459623", "0.54595363", "0.54592794", "0.5452831", "0.5450759", "0.54431707", "0.5419863", "0.54169035", "0.54156435", "0.5411583", "0.5384798", "0.53834903", "0.5381835", "0.5381623", "0.5380037", "0.5379997", "0.5379376", "0.5379195", "0.53772837", "0.5376055", "0.5371602", "0.53666157", "0.5364564", "0.53600186", "0.5357357", "0.5355544", "0.53524464", "0.53459126", "0.5341601", "0.5339307", "0.53389186", "0.5337032", "0.5334197", "0.5332126", "0.5324942", "0.5321346", "0.53163964", "0.53155166", "0.5311533", "0.5311423", "0.5311386", "0.53012896", "0.5293016", "0.5292055", "0.52911955", "0.5290372", "0.5289816", "0.52881694", "0.52808404" ]
0.57068914
27
creates a new value set if one does not already exist for a given tagset + timestamp.
func (e *Executor) createRowValuesIfNotExists(rows map[string]*Row, name string, tagset []byte, timestamp int64) []interface{} { // TODO: Add "name" to lookup key. // Find row by tagset. var row *Row if row = rows[string(tagset)]; row == nil { row = &Row{Name: name} // Create tag map. row.Tags = make(map[string]string) for i, v := range unmarshalStrings(tagset) { row.Tags[e.tags[i]] = v } // Create column names. row.Columns = make([]string, 1, len(e.stmt.Fields)+1) row.Columns[0] = "time" for i, f := range e.stmt.Fields { name := f.Name() if name == "" { name = fmt.Sprintf("col%d", i) } row.Columns = append(row.Columns, name) } // Save to lookup. rows[string(tagset)] = row } // If no values exist or last value doesn't match the timestamp then create new. if len(row.Values) == 0 || row.Values[len(row.Values)-1][0] != timestamp { values := make([]interface{}, len(e.processors)+1) values[0] = timestamp row.Values = append(row.Values, values) } return row.Values[len(row.Values)-1] }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewSet(timestamp Nanotime, values map[string]struct{}, source Source, tags Tags) Set {\n\treturn Set{Values: values, Timestamp: timestamp, Source: source, Tags: tags.Copy()}\n}", "func newSet(txn *Transaction, key []byte) *Set {\n\tnow := Now()\n\treturn &Set{\n\t\ttxn: txn,\n\t\tkey: key,\n\t\tmeta: &SetMeta{\n\t\t\tObject: Object{\n\t\t\t\tID: UUID(),\n\t\t\t\tCreatedAt: now,\n\t\t\t\tUpdatedAt: now,\n\t\t\t\tExpireAt: 0,\n\t\t\t\tType: ObjectSet,\n\t\t\t\tEncoding: ObjectEncodingHT,\n\t\t\t},\n\t\t\tLen: 0,\n\t\t},\n\t}\n}", "func Newset(flag, ex, tr int) *set_st {\n\ts := new(set_st)\n\ts.m = make(mapProto)\n\ts.set = make(setProto, 0)\n\ts.pset = unsafe.Pointer(&s.set)\n\n\ts.expireTime = ex\n\ts.triggerInterval = tr\n\ts.flag = flag\n\n\tgo s.refresh()\n\n\treturn s\n}", "func NewSet(host *Host, name string) (*Set, error) {\n\ts := &Set{host, pq.QuoteIdentifier(name)} // name is the name of the table\n\t// list is the name of the column\n\tif _, err := s.host.db.Exec(fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS %s (%s %s)\", s.table, setCol, defaultStringType)); err != nil {\n\t\tif !strings.HasSuffix(err.Error(), \"already exists\") {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif Verbose {\n\t\tlog.Println(\"Created table \" + s.table + \" in database \" + host.dbname)\n\t}\n\treturn s, nil\n}", "func getOrCreateMetricSet(entityIdentifier string, entityType string, m map[string]*metric.Set, i *integration.Integration) *metric.Set {\n\n\t// If the metric set already exists, return it\n\tset, ok := m[entityIdentifier]\n\tif ok {\n\t\treturn set\n\t}\n\n\t// If the metric set doesn't exist, get the entity for it and create a new metric set\n\te, _ := i.Entity(entityIdentifier, entityType) //can't error if both name and namespace are defined\n\tvar newSet *metric.Set\n\tif entityType == \"instance\" {\n\t\tnewSet = e.NewMetricSet(\"OracleDatabaseSample\", metric.Attr(\"entityName\", \"instance:\"+entityIdentifier), metric.Attr(\"displayName\", entityIdentifier))\n\t} else if entityType == \"tablespace\" {\n\t\tnewSet = e.NewMetricSet(\"OracleTablespaceSample\", metric.Attr(\"entityName\", \"tablespace:\"+entityIdentifier), metric.Attr(\"displayName\", entityIdentifier))\n\t} else {\n\t\tlog.Error(\"Unreachable code\")\n\t\tos.Exit(1)\n\t}\n\n\t// Put the new metric set the map\n\tm[entityIdentifier] = newSet\n\n\treturn newSet\n}", "func CreateSet(values ...interface{}) map[interface{}]struct{} {\n\treturn make(map[interface{}]struct{})\n}", "func NewSet(elements ...interface{}) Set {\n\toptions := &SetOptions{Cache: true}\n\tset := options.newThreadSafeSet()\n\tset.Add(elements...)\n\treturn &set\n}", "func NewSetUnknown(elementType attr.Type) SetValue {\n\treturn SetValue{\n\t\telementType: elementType,\n\t\tstate: attr.ValueStateUnknown,\n\t}\n}", "func (db *DB) CreateSeriesIfNotExists(name string, tags map[string]string) (*Measurement, *Series) {\n\t// Find or create meaurement\n\tm := db.measurements[name]\n\tif m == nil {\n\t\tm = NewMeasurement(name)\n\t\tdb.measurements[name] = m\n\t}\n\n\t// Normalize tags and try to match against existing series.\n\tif tags == nil {\n\t\ttags = make(map[string]string)\n\t}\n\tfor _, s := range m.series {\n\t\tif reflect.DeepEqual(s.tags, tags) {\n\t\t\treturn m, s\n\t\t}\n\t}\n\n\t// Create new series.\n\tdb.maxSeriesID++\n\ts := &Series{id: db.maxSeriesID, tags: tags}\n\n\t// Add series to DB and measurement.\n\tdb.series[s.id] = s\n\tm.series[s.id] = s\n\n\treturn m, s\n}", "func New(vals ...interface{}) Set {\n\ts := &setImpl{\n\t\tset: make(map[interface{}]struct{}, 0),\n\t}\n\tfor _, i := range vals {\n\t\ts.Insert(i)\n\t}\n\treturn s\n}", "func NewSet(values ...string) (set Set) {\n\tfor _, value := range values {\n\t\tif value != \"\" {\n\t\t\tset = append(set, New(value))\n\t\t}\n\t}\n\treturn\n}", "func New(dataType string) *HashSet {\n\treturn &HashSet{\n\t\tset: make(map[interface{}]interface{}),\n\t\tt: dataType,\n\t\tsize: 0,\n\t}\n}", "func NewSet(ss ...string) Set {\n\tsset := map[string]bool{}\n\tfor _, s := range ss {\n\t\tsset[s] = true\n\t}\n\n\treturn sset\n}", "func NewSet() Set {\n\tm := make(map[string]struct{})\n\treturn Set{m}\n}", "func (ts *TagSet) Unique() {\n\tseen := make(map[string]struct{})\n\tfor i := 0; i < len(*ts); {\n\t\tt := (*ts)[i]\n\t\tif _, found := seen[t]; found {\n\t\t\t*ts = append((*ts)[:i], (*ts)[i+1:]...)\n\t\t} else {\n\t\t\tseen[t] = struct{}{}\n\t\t\ti++\n\t\t}\n\t}\n}", "func (m *MongoDB) CreateMeteringTimeSeriesIfNotExist() error {\n\treturn m.CreateTimeSeriesIfNotExist(m.DBName, m.MeteringConn)\n}", "func NewSet(name string, loader pongo2.TemplateLoader) *TemplateSet {\n\treturn pongo2.NewSet(name, loader)\n}", "func NewSetValue(elementType attr.Type, elements []attr.Value) (SetValue, diag.Diagnostics) {\n\tvar diags diag.Diagnostics\n\n\t// Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521\n\tctx := context.Background()\n\n\tfor idx, element := range elements {\n\t\tif !elementType.Equal(element.Type(ctx)) {\n\t\t\tdiags.AddError(\n\t\t\t\t\"Invalid Set Element Type\",\n\t\t\t\t\"While creating a Set value, an invalid element was detected. \"+\n\t\t\t\t\t\"A Set must use the single, given element type. \"+\n\t\t\t\t\t\"This is always an issue with the provider and should be reported to the provider developers.\\n\\n\"+\n\t\t\t\t\tfmt.Sprintf(\"Set Element Type: %s\\n\", elementType.String())+\n\t\t\t\t\tfmt.Sprintf(\"Set Index (%d) Element Type: %s\", idx, element.Type(ctx)),\n\t\t\t)\n\t\t}\n\t}\n\n\tif diags.HasError() {\n\t\treturn NewSetUnknown(elementType), diags\n\t}\n\n\treturn SetValue{\n\t\telementType: elementType,\n\t\telements: elements,\n\t\tstate: attr.ValueStateKnown,\n\t}, nil\n}", "func (f *LogFile) TagValueSeriesIDSet(name, key, value []byte) (*tsdb.SeriesIDSet, error) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\n\ttk, ok := mm.tagSet[string(key)]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\n\ttv, ok := tk.tagValues[string(value)]\n\tif !ok {\n\t\treturn nil, nil\n\t} else if tv.cardinality() == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn tv.seriesIDSet(), nil\n}", "func NewSet(els ...string) (s Set) {\n\treturn s.Add(els...)\n}", "func New(getkey func(value interface{}) interface{}, vtype string) *SSet {\n\tvar set SSet\n\tset.list = arraylist.New()\n\tset.m = hashmap.New()\n\tset.m_index = make(map[interface{}]int)\n\tset.f = getkey\n\tset.item_type = vtype\n\tset.createline = time.Now().Unix()\n\treturn &set\n}", "func NewSet(t ...*Term) Set {\n\ts := newset(len(t))\n\tfor i := range t {\n\t\ts.Add(t[i])\n\t}\n\treturn s\n}", "func (c *createMeasurementsIfNotExistsCommand) addSeriesIfNotExists(measurement string, tags map[string]string) {\n\tm := c.addMeasurementIfNotExists(measurement)\n\n\ttagset := string(marshalTags(tags))\n\tfor _, t := range m.Tags {\n\t\tif string(marshalTags(t)) == tagset {\n\t\t\t// Series already present in subcommand, nothing to do.\n\t\t\treturn\n\t\t}\n\t}\n\t// Tag-set needs to added to subcommand.\n\tm.Tags = append(m.Tags, tags)\n\n\treturn\n}", "func (s *Store) SetByTags(key, value string, expiry time.Duration, tags []string) {\n\ts.l.Lock()\n\tdefer s.l.Unlock()\n\n\tfor _, tag := range tags {\n\t\tset := ds.New()\n\t\tif v, found := s.store.Get(tag); found {\n\t\t\tset = v.(*ds.StringSet)\n\t\t}\n\t\tset.Add(key)\n\t\ts.store.Set(tag, set, 1)\n\t}\n\n\ts.store.SetWithTTL(key, value, 1, expiry)\n}", "func (s int64set) add(value int64) bool {\n\n\t// 'ok' is true if value is within set\n\t_, ok := s[value]\n\tif ok {\n\t\treturn false // signifies nothing was inserted\n\t}\n\n\t// empty struct creation. The value in our set is mapped\n\t// to an empty struct because it takes of zero bytes.\n\t// but at least now the value is present in the set.\n\t// zero bytes thing is some black magic.\n\ts[value] = struct{}{}\n\treturn true\n}", "func NewSet() *Set {\n\treturn &Set{\n\t\tcache: make(map[I]bool),\n\t}\n}", "func New(values ...interface{}) *Set {\n\tset := &Set{items: make(map[interface{}]struct{})}\n\tif len(values) > 0 {\n\t\tset.Add(values...)\n\t}\n\treturn set\n}", "func (tv *logTagValue) seriesIDSet() *tsdb.SeriesIDSet {\n\tif tv.seriesSet != nil {\n\t\treturn tv.seriesSet.CloneNoLock()\n\t}\n\n\tss := tsdb.NewSeriesIDSet()\n\tfor seriesID := range tv.series {\n\t\tss.AddNoLock(seriesID)\n\t}\n\treturn ss\n}", "func getOrCreateMetricSet(entityMetricSets map[string]*metric.Set, e *integration.Entity, request *beanRequest, beanNameMatch string, eventType string, domain string) (*metric.Set, error) {\n\t// If the metric set exists, return it\n\tif ms, ok := entityMetricSets[beanNameMatch]; ok {\n\t\treturn ms, nil\n\t}\n\n\t// Attributes in all metric sets\n\tattributes := []attribute.Attribute{\n\t\t{Key: \"query\", Value: request.beanQuery},\n\t\t{Key: \"domain\", Value: domain},\n\t\t{Key: \"host\", Value: args.JmxHost},\n\t\t{Key: \"bean\", Value: beanNameMatch},\n\t}\n\n\tif !args.LocalEntity {\n\t\tnonLocalKeys := []attribute.Attribute{\n\t\t\t{Key: \"entityName\", Value: \"domain:\" + e.Metadata.Name},\n\t\t\t{Key: \"displayName\", Value: e.Metadata.Name},\n\t\t}\n\t\tattributes = append(attributes, nonLocalKeys...)\n\t}\n\n\t// Add the bean keys and properties as attributes\n\tkeyProperties, err := getKeyProperties(beanNameMatch)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor key, val := range keyProperties {\n\t\tattributes = append(attributes, attribute.Attribute{Key: \"key:\" + key, Value: val})\n\t}\n\n\t// Create the metric set and put it in the map\n\tmetricSet := e.NewMetricSet(eventType, attributes...)\n\tentityMetricSets[beanNameMatch] = metricSet\n\n\treturn metricSet, nil\n}", "func NewSet(compare func(interface{}, interface{}) int) Set {\n\treturn Set{NewTree(compare)}\n}", "func main() {\n\ts := make(set)\n\ts[\"item1\"] = struct{}{}\n\ts[\"item2\"] = struct{}{}\n\ts[\"item1\"] = struct{}{} //Won't be added. Matches already added key.\n\tfmt.Println(getSetValues(s))\n}", "func (s *Set) New() Set {\n\treturn Set{make(map[interface{}]bool), 0}\n}", "func (v *Timestamp) Set(x interface{}) (err error) {\n\treturn v.Scan(x)\n}", "func (i Info) set(t, v string) {\n\tif _, ok := tags[t]; ok {\n\t\ti[t] = v\n\t\treturn\n\t}\n\n\tfor k, tt := range tags {\n\t\tif tt == t {\n\t\t\ti[k] = v\n\t\t\treturn\n\t\t}\n\t}\n}", "func New() Set {\n\treturn make(map[string]bool)\n}", "func NewSetValueFrom(ctx context.Context, elementType attr.Type, elements any) (SetValue, diag.Diagnostics) {\n\tattrValue, diags := reflect.FromValue(\n\t\tctx,\n\t\tSetType{ElemType: elementType},\n\t\telements,\n\t\tpath.Empty(),\n\t)\n\n\tif diags.HasError() {\n\t\treturn NewSetUnknown(elementType), diags\n\t}\n\n\tset, ok := attrValue.(SetValue)\n\n\t// This should not happen, but ensure there is an error if it does.\n\tif !ok {\n\t\tdiags.AddError(\n\t\t\t\"Unable to Convert Set Value\",\n\t\t\t\"An unexpected result occurred when creating a Set using SetValueFrom. \"+\n\t\t\t\t\"This is an issue with terraform-plugin-framework and should be reported to the provider developers.\",\n\t\t)\n\t}\n\n\treturn set, diags\n}", "func NewSetString() *SetString {\n\treturn &SetString{\n\t\tcache: make(map[string]bool),\n\t}\n}", "func NewSet(elements ...Element) (elem CollectionElement, err error) {\n\n\t// check for errors\n\tfor _, child := range elements {\n\t\tif child == nil {\n\t\t\terr = ErrInvalidElement\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err == nil {\n\t\tcoll := &collectionElemImpl{\n\t\t\tstartSymbol: SetStartLiteral,\n\t\t\tendSymbol: SetEndLiteral,\n\t\t\tseparatorSymbol: SetSeparatorLiteral,\n\t\t\tcollection: []Element{},\n\t\t}\n\n\t\tvar base *baseElemImpl\n\t\tif base, err = makeBaseElement(coll, SetType, collectionSerialization(false)); err == nil {\n\t\t\tcoll.baseElemImpl = base\n\t\t\telem = coll\n\t\t\terr = elem.Append(elements...)\n\t\t}\n\t}\n\n\treturn elem, err\n}", "func NewSet() *Set {\n\treturn &Set{elements: make(map[interface{}]bool), mu: sync.Mutex{}}\n}", "func (f *LogFile) createMeasurementIfNotExists(name []byte) *logMeasurement {\n\tmm := f.mms[string(name)]\n\tif mm == nil {\n\t\tmm = &logMeasurement{\n\t\t\tname: name,\n\t\t\ttagSet: make(map[string]logTagKey),\n\t\t\tseries: make(map[uint64]struct{}),\n\t\t}\n\t\tf.mms[string(name)] = mm\n\t}\n\treturn mm\n}", "func (t *ttlCache) putAndGetDiff(name string, tags []string, ts uint64, val float64) (dx float64, ok bool) {\n\tkey := t.metricDimensionsToMapKey(name, tags)\n\tif c, found := t.cache.Get(key); found {\n\t\tcnt := c.(numberCounter)\n\t\tif cnt.ts > ts {\n\t\t\t// We were given a point older than the one in memory so we drop it\n\t\t\t// We keep the existing point in memory since it is the most recent\n\t\t\treturn 0, false\n\t\t}\n\t\t// if dx < 0, we assume there was a reset, thus we save the point\n\t\t// but don't export it (it's the first one so we can't do a delta)\n\t\tdx = val - cnt.value\n\t\tok = dx >= 0\n\t}\n\n\tt.cache.Set(key, numberCounter{ts, val}, gocache.DefaultExpiration)\n\treturn\n}", "func NewSet(labels ...Instance) Set {\n\ts := make(map[Instance]struct{})\n\tfor _, l := range labels {\n\t\ts[l] = struct{}{}\n\t}\n\n\treturn s\n}", "func createSet(dsGraph *constellation.Config) (set.Set, set.Set) {\n\n\t// Create sets to hold services and relationships - used to find differences between old and new using intersection and difference operations\n\tretSetServices := set.NewSet()\n\tretSetRelationships := set.NewSet()\n\n\t//Store all services in the services set\n\tfor _, v := range dsGraph.Services {\n\t\tretSetServices.Add(v.ID)\n\t}\n\n\t//Store relationships in the relationship set\n\tfor _, v := range dsGraph.Relationships {\n\t\tretSetRelationships.Add(v.From + \"|\" + v.To)\n\t}\n\n\treturn retSetServices, retSetRelationships\n}", "func (ts *TagSet) Add(tag string) (ok bool) {\n\tif ts.Has(tag) {\n\t\treturn false\n\t}\n\t*ts = append(*ts, tag)\n\treturn true\n}", "func NewSet(items ...Value) *Set {\n\tmapItems := make([]privateSetMapItem, 0, len(items))\n\tvar mapValue struct{}\n\tfor _, x := range items {\n\t\tmapItems = append(mapItems, privateSetMapItem{Key: x, Value: mapValue})\n\t}\n\n\treturn &Set{backingMap: newprivateSetMap(mapItems)}\n}", "func (t tagSet) Insert(p tagPair) tagSet {\n\ti := t.Search(p.key)\n\tif i < len(t) && t[i].key == p.key {\n\t\tt[i].value = p.value\n\t\treturn t // exists\n\t}\n\t// append t to the end of the slice\n\tif i == len(t) {\n\t\treturn append(t, p)\n\t}\n\t// insert p\n\tt = append(t, tagPair{})\n\tcopy(t[i+1:], t[i:])\n\tt[i] = p\n\treturn t\n}", "func (c *Variable) Set(tmpl, ts string, e ...Entry) error {\n var err error\n\n if len(e) == 0 {\n return nil\n } else if tmpl == \"\" && ts == \"\" {\n return fmt.Errorf(\"tmpl or ts must be specified\")\n }\n\n _, fn := c.versioning()\n names := make([]string, len(e))\n\n // Build up the struct with the given configs.\n d := util.BulkElement{XMLName: xml.Name{Local: \"variable\"}}\n for i := range e {\n d.Data = append(d.Data, fn(e[i]))\n names[i] = e[i].Name\n }\n c.con.LogAction(\"(set) template variables: %v\", names)\n\n // Set xpath.\n path := c.xpath(tmpl, ts, names)\n if len(e) == 1 {\n path = path[:len(path) - 1]\n } else {\n path = path[:len(path) - 2]\n }\n\n // Create the template variables.\n _, err = c.con.Set(path, d.Config(), nil, nil)\n return err\n}", "func (fs *FS) newInvalSet() *invalSet { return &invalSet{server: fs.server} }", "func (ts *TagSet) has(t ident.Ident) bool {\n\t// TODO: this could be much, much faster! Maybe build a set on\n\t// each TagSet as required (with sync.Once)?\n\n\tfor _, t2 := range ts.tags {\n\t\tif t2.Equals(t) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (v *TimestampNano) Set(x interface{}) (err error) {\n\treturn v.Scan(x)\n}", "func (sm *DefaultIDSetMap) getOrCreate(key int) *IDSet {\n\tif sm.key == key {\n\t\treturn sm.value\n\t}\n\tif sm.m != nil {\n\t\tif s, ok := sm.m[key]; ok {\n\t\t\treturn s\n\t\t}\n\t}\n\n\tif sm.m != nil {\n\t\t// Already a map, just add a new key\n\t\ts := NewIDSet()\n\t\tsm.m[key] = s\n\t\treturn s\n\t}\n\n\tif sm.key == 0 {\n\t\t// Populating a singleton set\n\t\ts := NewIDSet()\n\t\tsm.key = key\n\t\tsm.value = s\n\t\treturn s\n\t}\n\n\t// Adding to a singleton set. Create a new map and add the\n\t// new value along with the old singleton value to the it\n\ts := NewIDSet()\n\tsm.m = make(map[int]*IDSet, 2)\n\tsm.m[key] = s\n\tsm.m[sm.key] = sm.value\n\tsm.key = 0\n\tsm.value = nil\n\treturn s\n}", "func (s FeatureSet) Add(tag FeatureTag, values ...FeatureTagValue) {\n\tif !s.Contains(tag) {\n\t\ts[tag] = values\n\t\treturn\n\t}\n\tfor t, values := range s {\n\t\tif t.Equals(tag) {\n\t\t\ts[t] = append(s[t], values...)\n\t\t}\n\t}\n}", "func New() *Set {\n\treturn &Set{make(Record)}\n}", "func NewSet(name string) *Set {\n\ts := &Set{name: name}\n\ts.set = make(map[string]struct{})\n\treturn s\n}", "func (s *tagStore) CreateOrUpdate(ctx context.Context, t *models.Tag) error {\n\tdefer metrics.InstrumentQuery(\"tag_create_or_update\")()\n\tq := `INSERT INTO tags (top_level_namespace_id, repository_id, manifest_id, name)\n\t\t VALUES ($1, $2, $3, $4)\n\t ON CONFLICT (top_level_namespace_id, repository_id, name)\n\t\t DO UPDATE SET\n\t\t\t manifest_id = EXCLUDED.manifest_id, updated_at = now()\n\t\t WHERE\n\t\t\t tags.manifest_id <> excluded.manifest_id\n\t RETURNING\n\t\t id, created_at, updated_at`\n\n\trow := s.db.QueryRowContext(ctx, q, t.NamespaceID, t.RepositoryID, t.ManifestID, t.Name)\n\tif err := row.Scan(&t.ID, &t.CreatedAt, &t.UpdatedAt); err != nil && err != sql.ErrNoRows {\n\t\tvar pgErr *pgconn.PgError\n\t\t// this can happen if the manifest is deleted by the online GC while attempting to tag an untagged manifest\n\t\tif errors.As(err, &pgErr) && pgErr.Code == pgerrcode.ForeignKeyViolation {\n\t\t\treturn ErrManifestNotFound\n\t\t}\n\t\treturn fmt.Errorf(\"creating tag: %w\", err)\n\t}\n\n\treturn nil\n}", "func GetOrCreateDataset(sessionId string) (*object.Session, bool) {\n\tif val, ok := datasets[sessionId]; ok {\n\t\treturn val, false\n\t}\n\n\tdatasets[sessionId] = object.NewSession(sessionId)\n\treturn datasets[sessionId], true\n}", "func NewSet() *Set {\n\treturn &Set{\n\t\tm: make(map[string]*namedMetric),\n\t}\n}", "func PushKeyValTSDS(key string, val string, timestamp goltime.Timestamp) bool {\n\tif levigoTSDS.PushTSDS(key, val, timestamp.Time(), db) == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}", "func typicalGetSet(t *testing.T, newCache cacheFactory) {\n\tvar err error\n\tcache := newCache(t, time.Hour)\n\n\tvalue := \"foo\"\n\tif err = cache.Set(\"value\", value, testExpiryTime); err != nil {\n\t\tt.Errorf(\"Error setting a value: %s\", err)\n\t}\n\n\tvalue = \"\"\n\terr = cache.Get(\"value\", &value)\n\tif err != nil {\n\t\tt.Errorf(\"Error getting a value: %s\", err)\n\t}\n\tif value != \"foo\" {\n\t\tt.Errorf(\"Expected to get foo back, got %s\", value)\n\t}\n}", "func NewSetQuery() filters.Spec { return &modQuery{behavior: set} }", "func NewSet() *Set {\n\treturn &Set{\n\t\tset: make(map[string]bool),\n\t}\n}", "func (s *DatabaseIndex) createSeriesIndexIfNotExists(measurementName string, series *Series) *Series {\n\t// if there is a measurement for this id, it's already been added\n\tss := s.series[series.Key]\n\tif ss != nil {\n\t\treturn ss\n\t}\n\n\t// get or create the measurement index\n\tm := s.createMeasurementIndexIfNotExists(measurementName)\n\n\t// set the in memory ID for query processing on this shard\n\tseries.id = s.lastID + 1\n\ts.lastID += 1\n\n\tseries.measurement = m\n\ts.series[series.Key] = series\n\n\tm.AddSeries(series)\n\n\treturn series\n}", "func (s *DatabaseIndex) createMeasurementIndexIfNotExists(name string) *Measurement {\n\tname = unescapeString(name)\n\tm := s.measurements[name]\n\tif m == nil {\n\t\tm = NewMeasurement(name, s)\n\t\ts.measurements[name] = m\n\t\ts.names = append(s.names, name)\n\t\tsort.Strings(s.names)\n\t}\n\treturn m\n}", "func (st *buildStatus) newTestSet(testStats *buildstats.TestStats, names []distTestName) (*testSet, error) {\n\tset := &testSet{\n\t\tst: st,\n\t\ttestStats: testStats,\n\t}\n\tfor _, name := range names {\n\t\tset.items = append(set.items, &testItem{\n\t\t\tset: set,\n\t\t\tname: name,\n\t\t\tduration: testStats.Duration(st.BuilderRev.Name, name.Old),\n\t\t\ttake: make(chan token, 1),\n\t\t\tdone: make(chan token),\n\t\t})\n\t}\n\treturn set, nil\n}", "func (c *Conn) NewSet(table, name string) (*Set, error) {\n\ttabs, err := c.ListTables()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, tab := range tabs {\n\t\tif tab.Name == table || table == \"\" {\n\t\t\ts, err := c.GetSetByName(tab, name)\n\t\t\tif s == nil {\n\t\t\t\t// TODO: how to inspect netlink errors? Seems we're getting back only fmt.wrapErr\n\t\t\t\tif table == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &Set{\n\t\t\t\tConn: c,\n\t\t\t\tSet: s,\n\t\t\t\tMap: make(map[string][]byte),\n\t\t\t}, nil\n\t\t}\n\t}\n\treturn nil, errors.New(\"table or set not found\")\n}", "func setTag(svc *ec2.EC2, tagKey string, tagValue string, volumeID string) bool {\n\ttags := &ec2.CreateTagsInput{\n\t\tResources: []*string{\n\t\t\taws.String(volumeID),\n\t\t},\n\t\tTags: []*ec2.Tag{\n\t\t\t{\n\t\t\t\tKey: aws.String(tagKey),\n\t\t\t\tValue: aws.String(tagValue),\n\t\t\t},\n\t\t},\n\t}\n\tret, err := svc.CreateTags(tags)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn false\n\t}\n\tif verbose {\n\t\tlog.Println(ret)\n\t}\n\treturn true\n}", "func NewSet()(*Set) {\n m := &Set{\n Entity: *iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.NewEntity(),\n }\n return m\n}", "func (s *Set) Set(e interface{}, t time.Time) {\n\ts.Lock()\n\tif val, ok := s.members[e]; !ok || t.UnixNano() > val.UnixNano() {\n\t\ts.members[e] = t\n\t}\n\ts.Unlock()\n}", "func NewSet(strings ...string) *Set {\n\tset := &Set{\n\t\tm: map[string]struct{}{},\n\t}\n\tfor _, s := range strings {\n\t\t_ = set.Add(s)\n\t}\n\treturn set\n}", "func MakeSet(s map[interface{}]bool) (*skylark.Set, error) {\n\tset := skylark.Set{}\n\tfor k := range s {\n\t\tkey, err := ToValue(k)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := set.Insert(key); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &set, nil\n}", "func hasTag(tags []*ec2.Tag, Key string, value string) bool {\n\tfor i := range tags {\n\t\tif *tags[i].Key == Key && *tags[i].Value == value {\n\t\t\tlog.Printf(\"\\t\\tTag %s already set with value %s\\n\",\n\t\t\t\t*tags[i].Key,\n\t\t\t\t*tags[i].Value)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func makeSet() *customSet {\n\treturn &customSet{\n\t\tcontainer: make(map[string]struct{}),\n\t}\n}", "func NewSetInit(k string) (ns Set) {\n\tns = NewSet()\n\tns.add(k)\n\treturn ns\n}", "func NewSubtractionSet(store Store, cacheTime time.Duration, notAvailableTTL time.Duration, set1 ComposedSet, set2 ComposedSet) ComposedSet {\n\treturn ComposeIDs(ComposeWarmup(NewSubtractionSetImp(store, cacheTime, notAvailableTTL, set1, set2), store), store)\n}", "func (sf *StructField) TagSettingsSet(key, val string) {\n sf.tagSettingsLock.Lock()\n defer sf.tagSettingsLock.Unlock()\n sf.TagSettings[key] = val\n}", "func (dao *sqlimpl) Set(meta *idm.UserMeta) (*idm.UserMeta, bool, error) {\n\tvar (\n\t\tupdate bool\n\t\tmetaId string\n\t)\n\n\towner := dao.extractOwner(meta.Policies)\n\n\tif stmt := dao.GetStmt(\"Exists\"); stmt != nil {\n\t\tdefer stmt.Close()\n\n\t\texists := stmt.QueryRow(meta.NodeUuid, meta.Namespace, owner)\n\t\tif err := exists.Scan(&metaId); err == nil && metaId != \"\" {\n\t\t\tupdate = true\n\t\t} else {\n\t\t\tmetaId = uuid.NewUUID().String()\n\t\t}\n\t}\n\n\tvar err error\n\tif update {\n\t\tstmt := dao.GetStmt(\"UpdateMeta\")\n\t\tif stmt == nil {\n\t\t\treturn nil, false, fmt.Errorf(\"Unknown statement\")\n\t\t}\n\t\tdefer stmt.Close()\n\n\t\tif _, err := stmt.Exec(\n\t\t\tmeta.NodeUuid,\n\t\t\tmeta.Namespace,\n\t\t\towner,\n\t\t\tint32(time.Now().Unix()),\n\t\t\t\"json\",\n\t\t\tmeta.JsonValue,\n\t\t\t&metaId,\n\t\t); err != nil {\n\t\t\treturn meta, update, err\n\t\t}\n\t} else {\n\t\tstmt := dao.GetStmt(\"AddMeta\")\n\t\tif stmt == nil {\n\t\t\treturn nil, false, fmt.Errorf(\"Unknown statement\")\n\t\t}\n\t\tdefer stmt.Close()\n\n\t\tif _, err := stmt.Exec(\n\t\t\tmetaId,\n\t\t\tmeta.NodeUuid,\n\t\t\tmeta.Namespace,\n\t\t\towner,\n\t\t\ttime.Now().Unix(),\n\t\t\t\"json\",\n\t\t\tmeta.JsonValue,\n\t\t); err != nil {\n\t\t\treturn meta, update, err\n\t\t}\n\n\t\tmeta.Uuid = metaId\n\t}\n\n\tif err == nil && len(meta.Policies) > 0 {\n\t\tfor _, p := range meta.Policies {\n\t\t\tp.Resource = meta.Uuid\n\t\t}\n\t\terr = dao.AddPolicies(update, meta.Uuid, meta.Policies)\n\t}\n\n\treturn meta, update, err\n}", "func (e *RawExecutor) limitTagSet(tagset string) {\n\te.limitedTagSets[tagset] = struct{}{}\n}", "func NewSet() Set {\n\treturn make(Set)\n}", "func (c *Controller) createOrUpdateStatefulSetResource(chi *chop.ClickHouseInstallation, newStatefulSet *apps.StatefulSet) error {\n\t// Check whether object with such name already exists in k8s\n\toldStatefulSet, err := c.statefulSetLister.StatefulSets(chi.Namespace).Get(newStatefulSet.Name)\n\n\tif oldStatefulSet != nil {\n\t\t// StatefulSet already exists - update it\n\t\tnewStatefulSet.Namespace = oldStatefulSet.Namespace\n\t\treturn c.updateStatefulSet(oldStatefulSet, newStatefulSet)\n\t}\n\n\tif apierrors.IsNotFound(err) {\n\t\t// StatefulSet with such name not found - create StatefulSet\n\t\treturn c.createStatefulSet(chi, newStatefulSet)\n\t}\n\n\t// Error has happened with .Get()\n\treturn err\n}", "func (s StringSet) Add(x string) { s[x] = struct{}{} }", "func (o SetOptions) New(elements ...interface{}) (set Set) {\n\tif o.Unsafe {\n\t\tnewSet := o.newThreadUnsafeSet()\n\t\tset = &newSet\n\t} else {\n\t\tnewSet := o.newThreadSafeSet()\n\t\tset = &newSet\n\t}\n\tset.Add(elements...)\n\treturn\n}", "func (dal *DataAccessLayer) GetTimeSeriesDatumByTenantIdAndCreatedAt(tenantId int64, timestamp int64) (*TimeSeriesDatum, error) {\n thing := TimeSeriesDatum{} // The struct which will be populated from the database.\n\n // DEVELOPERS NOTE:\n // (1) Lookup the thing based on the id.\n // (2) PostgreSQL uses an enumerated $1, $2, etc bindvar syntax\n err := dal.db.Get(&thing, \"SELECT * FROM data WHERE timestamp = $1 AND tenant_id = $2\", timestamp, tenantId)\n\n // Handling non existing item\n if err == sql.ErrNoRows {\n return nil, nil\n } else if err != nil {\n return nil, err\n }\n\n return &thing, nil\n}", "func ExampleIntSet_AddIfNotExist() {\n\tintSet := gset.NewIntSetFrom([]int{1, 2, 3})\n\tintSet.Add(1)\n\tfmt.Println(intSet.Slice())\n\tfmt.Println(intSet.AddIfNotExist(1))\n\n\t// Mya Output:\n\t// [1 2 3]\n\t// false\n}", "func (m *MongoDB) CreateMonitorTimeSeriesIfNotExist(collTime time.Time) error {\n\treturn m.CreateTimeSeriesIfNotExist(m.DBName, m.getMonitorCollectionName(collTime))\n}", "func createVoteSet(k1, k2 []key.ConsensusKeys, hash []byte, size int, round uint64, step uint8) (events []consensus.Event) {\n\t// We can not have duplicates in the vote set.\n\tduplicates := make(map[string]struct{})\n\t// We need 75% of the committee size worth of events to reach quorum.\n\tfor j := 0; j < int(float64(size)*0.75); j++ {\n\t\tif _, ok := duplicates[string(k1[j].BLSPubKeyBytes)]; !ok {\n\t\t\tev := mockReduction(hash, round, step-2, k1, j)\n\t\t\tevents = append(events, ev)\n\t\t\tduplicates[string(k1[j].BLSPubKeyBytes)] = struct{}{}\n\t\t}\n\t}\n\n\t// Clear the duplicates map, since we will most likely have identical keys in each array\n\tfor k := range duplicates {\n\t\tdelete(duplicates, k)\n\t}\n\n\tfor j := 0; j < int(float64(size)*0.75); j++ {\n\t\tif _, ok := duplicates[string(k2[j].BLSPubKeyBytes)]; !ok {\n\t\t\tev := mockReduction(hash, round, step-1, k2, j)\n\t\t\tevents = append(events, ev)\n\t\t\tduplicates[string(k2[j].BLSPubKeyBytes)] = struct{}{}\n\t\t}\n\t}\n\n\treturn events\n}", "func FromTaggedTimeSeriesSets(\n\tseries []*tsdb.TaggedTimeSeriesSet,\n\tcolNames [][]string,\n\tpqs []tsdbjson.ParsedQuery,\n\tepochConversion func(ts int64) int64) *client.Response {\n\tif len(series) != len(pqs) {\n\t\tpanic(\"Slices must be of equal length\")\n\t}\n\tif len(series) != len(colNames) {\n\t\tpanic(\"Slices must be of equal length\")\n\t}\n\treturn fromTaggedTimeSeriesSets(series, colNames, pqs, epochConversion)\n}", "func (t Tags) Set(name string, v any) {\n\tt[name] = toString(v)\n}", "func NewSet() *Set {\n\treturn &Set{set: make(map[cid.Cid]struct{}), lk: sync.Mutex{}}\n}", "func NewMetricSetSafe(namespace string, tickTime time.Duration) *MetricSet {\n\tnsp := namespace\n\tk := 0\n\tvar res *MetricSet\n\tfor res == nil {\n\t\tfunc() {\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\tk++\n\t\t\t\t\tnsp = fmt.Sprintf(\"%s.%d\", namespace, k)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tres = NewMetricSet(nsp, tickTime)\n\t\t}()\n\t}\n\treturn res\n}", "func NewSetValueMust(elementType attr.Type, elements []attr.Value) SetValue {\n\tset, diags := NewSetValue(elementType, elements)\n\n\tif diags.HasError() {\n\t\t// This could potentially be added to the diag package.\n\t\tdiagsStrings := make([]string, 0, len(diags))\n\n\t\tfor _, diagnostic := range diags {\n\t\t\tdiagsStrings = append(diagsStrings, fmt.Sprintf(\n\t\t\t\t\"%s | %s | %s\",\n\t\t\t\tdiagnostic.Severity(),\n\t\t\t\tdiagnostic.Summary(),\n\t\t\t\tdiagnostic.Detail()))\n\t\t}\n\n\t\tpanic(\"SetValueMust received error(s): \" + strings.Join(diagsStrings, \"\\n\"))\n\t}\n\n\treturn set\n}", "func NewSet() *Set {\n\treturn newSet()\n}", "func New(keys ...string) Set {\n\tset := Empty()\n\tfor _, k := range keys {\n\t\tset.Add(k)\n\t}\n\treturn set\n}", "func (s *Set) addWithTransactionNoCheck(ctx context.Context, transaction *sql.Tx, value string) error {\n\tif !s.host.rawUTF8 {\n\t\tEncode(&value)\n\t}\n\t_, err := transaction.Exec(fmt.Sprintf(\"INSERT INTO %s (%s) VALUES ($1)\", s.table, setCol), value)\n\treturn err\n}", "func (s *set) insert(x *Term) {\n\thash := x.Hash()\n\tinsertHash := hash\n\t// This `equal` utility is duplicated and manually inlined a number of\n\t// time in this file. Inlining it avoids heap allocations, so it makes\n\t// a big performance difference: some operations like lookup become twice\n\t// as slow without it.\n\tvar equal func(v Value) bool\n\n\tswitch x := x.Value.(type) {\n\tcase Null, Boolean, String, Var:\n\t\tequal = func(y Value) bool { return x == y }\n\tcase Number:\n\t\tif xi, err := json.Number(x).Int64(); err == nil {\n\t\t\tequal = func(y Value) bool {\n\t\t\t\tif y, ok := y.(Number); ok {\n\t\t\t\t\tif yi, err := json.Number(y).Int64(); err == nil {\n\t\t\t\t\t\treturn xi == yi\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t// We use big.Rat for comparing big numbers.\n\t\t// It replaces big.Float due to following reason:\n\t\t// big.Float comes with a default precision of 64, and setting a\n\t\t// larger precision results in more memory being allocated\n\t\t// (regardless of the actual number we are parsing with SetString).\n\t\t//\n\t\t// Note: If we're so close to zero that big.Float says we are zero, do\n\t\t// *not* big.Rat).SetString on the original string it'll potentially\n\t\t// take very long.\n\t\tvar a *big.Rat\n\t\tfa, ok := new(big.Float).SetString(string(x))\n\t\tif !ok {\n\t\t\tpanic(\"illegal value\")\n\t\t}\n\t\tif fa.IsInt() {\n\t\t\tif i, _ := fa.Int64(); i == 0 {\n\t\t\t\ta = new(big.Rat).SetInt64(0)\n\t\t\t}\n\t\t}\n\t\tif a == nil {\n\t\t\ta, ok = new(big.Rat).SetString(string(x))\n\t\t\tif !ok {\n\t\t\t\tpanic(\"illegal value\")\n\t\t\t}\n\t\t}\n\n\t\tequal = func(b Value) bool {\n\t\t\tif bNum, ok := b.(Number); ok {\n\t\t\t\tvar b *big.Rat\n\t\t\t\tfb, ok := new(big.Float).SetString(string(bNum))\n\t\t\t\tif !ok {\n\t\t\t\t\tpanic(\"illegal value\")\n\t\t\t\t}\n\t\t\t\tif fb.IsInt() {\n\t\t\t\t\tif i, _ := fb.Int64(); i == 0 {\n\t\t\t\t\t\tb = new(big.Rat).SetInt64(0)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif b == nil {\n\t\t\t\t\tb, ok = new(big.Rat).SetString(string(bNum))\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tpanic(\"illegal value\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn a.Cmp(b) == 0\n\t\t\t}\n\n\t\t\treturn false\n\t\t}\n\tdefault:\n\t\tequal = func(y Value) bool { return Compare(x, y) == 0 }\n\t}\n\n\tfor curr, ok := s.elems[insertHash]; ok; {\n\t\tif equal(curr.Value) {\n\t\t\treturn\n\t\t}\n\n\t\tinsertHash++\n\t\tcurr, ok = s.elems[insertHash]\n\t}\n\n\ts.elems[insertHash] = x\n\t// O(1) insertion, but we'll have to re-sort the keys later.\n\ts.keys = append(s.keys, x)\n\t// Reset the sync.Once instance.\n\t// See https://github.com/golang/go/issues/25955 for why we do it this way.\n\ts.sortGuard = new(sync.Once)\n\n\ts.hash += hash\n\ts.ground = s.ground && x.IsGround()\n}", "func newTrySet(key tryKey) (*trySet, error) {\n\tgoHead := getRepoHead(\"go\")\n\tif key.Repo != \"go\" && goHead == \"\" {\n\t\t// We don't know the go HEAD yet (but we will)\n\t\t// so don't create this trySet yet as we don't\n\t\t// know which Go revision to build against.\n\t\treturn nil, errHeadUnknown\n\t}\n\n\tbuilders := tryBuilders\n\tif key.Repo != \"go\" {\n\t\tbuilders = subTryBuilders\n\t}\n\n\tlog.Printf(\"Starting new trybot set for %v\", key)\n\tts := &trySet{\n\t\ttryKey: key,\n\t\ttryID: \"T\" + randHex(9),\n\t\ttrySetState: trySetState{\n\t\t\tremain: len(builders),\n\t\t\tbuilds: make([]*buildStatus, len(builders)),\n\t\t},\n\t}\n\n\tgo ts.notifyStarting()\n\tfor i, bconf := range builders {\n\t\tbrev := tryKeyToBuilderRev(bconf.Name, key)\n\t\tbs, err := newBuild(brev)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"can't create build for %q: %v\", brev, err)\n\t\t\tcontinue\n\t\t}\n\t\tbs.trySet = ts\n\t\tstatus[brev] = bs\n\t\tts.builds[i] = bs\n\t\tgo bs.start() // acquires statusMu itself, so in a goroutine\n\t\tgo ts.awaitTryBuild(i, bconf, bs)\n\t}\n\treturn ts, nil\n}", "func (s int64set) has(value int64) bool {\n\t_, ok := s[value]\n\treturn ok\n}", "func NewSet() *Set {\n\treturn &Set{}\n}", "func (ipset *IPSet) Create(setName string, createOptions ...string) (*Set, error) {\n\t// Populate Set map if needed\n\tif ipset.Get(setName) == nil {\n\t\tipset.Sets[setName] = &Set{\n\t\t\tName: setName,\n\t\t\tOptions: createOptions,\n\t\t\tParent: ipset,\n\t\t}\n\t}\n\n\t// Determine if set with the same name is already active on the system\n\tsetIsActive, err := ipset.Sets[setName].IsActive()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to determine if ipset set %s exists: %s\",\n\t\t\tsetName, err)\n\t}\n\n\t// Create set if missing from the system\n\tif !setIsActive {\n\t\tif ipset.isIpv6 {\n\t\t\t// Add \"family inet6\" option and a \"inet6:\" prefix for IPv6 sets.\n\t\t\targs := []string{\"create\", \"-exist\", ipset.Sets[setName].name()}\n\t\t\targs = append(args, createOptions...)\n\t\t\targs = append(args, \"family\", \"inet6\")\n\t\t\tif _, err := ipset.run(args...); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to create ipset set on system: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\t_, err := ipset.run(append([]string{\"create\", \"-exist\", setName},\n\t\t\t\tcreateOptions...)...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to create ipset set on system: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn ipset.Sets[setName], nil\n}", "func NewSet(members ...uint) Set {\n\ts := Set{}\n\tfor _, member := range members {\n\t\ts[member] = true\n\t}\n\treturn s\n}", "func NewSet() *Set {\n\tcomparator := func(left, right interface{}) bool {\n\t\treturn left.(Ordered).LessThan(right.(Ordered))\n\t}\n\treturn NewCustomSet(comparator)\n}" ]
[ "0.70187044", "0.5677267", "0.5523787", "0.54961294", "0.5219592", "0.51507616", "0.514083", "0.51023275", "0.5094932", "0.5091561", "0.5059189", "0.5022734", "0.49620268", "0.49563345", "0.49500543", "0.49257427", "0.4912637", "0.49085042", "0.4883463", "0.48798203", "0.48776954", "0.48573336", "0.48514235", "0.4831704", "0.48063657", "0.47952738", "0.47843203", "0.4774203", "0.4772104", "0.4768274", "0.47613758", "0.4752682", "0.47464064", "0.4736536", "0.4719049", "0.47159487", "0.4711793", "0.47057566", "0.4705247", "0.4702158", "0.46806803", "0.46706945", "0.46667263", "0.46524173", "0.46414408", "0.4617434", "0.46137467", "0.4606809", "0.46038112", "0.4588181", "0.45720318", "0.45625913", "0.45557576", "0.45505914", "0.45479658", "0.45338774", "0.45315364", "0.4531158", "0.45221218", "0.4521426", "0.45176613", "0.4515069", "0.4510496", "0.44975606", "0.4497281", "0.44750524", "0.4469725", "0.4461717", "0.44615975", "0.44573748", "0.44527912", "0.44520557", "0.44471464", "0.4438479", "0.44334835", "0.44334233", "0.44278485", "0.44157237", "0.44150808", "0.4414631", "0.4403897", "0.44038254", "0.43948153", "0.43923172", "0.43794623", "0.4379361", "0.43778247", "0.43706724", "0.4369697", "0.4350374", "0.43428856", "0.43377677", "0.4336613", "0.43352997", "0.43268773", "0.4320883", "0.4319245", "0.4313831", "0.43133613", "0.4310949" ]
0.5942259
1
dimensionKeys returns a list of tag key names for the dimensions. Each dimension must be a VarRef.
func dimensionKeys(dimensions Dimensions) (a []string) { for _, d := range dimensions { a = append(a, d.Expr.(*VarRef).Val) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Measurement) TagKeys() []string {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tkeys := make([]string, 0, len(m.seriesByTagKeyValue))\n\tfor k := range m.seriesByTagKeyValue {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (target *Target) TagKeys() []string {\n\n\tkeys := make([]string, len(target.Tags))\n\n\tfor i, tag := range target.Tags {\n\t\tkeys[i] = tag.Key\n\t\ti++\n\t}\n\n\treturn keys\n}", "func (o *FiltersNatService) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (m *logMeasurement) keys() []string {\n\ta := make([]string, 0, len(m.tagSet))\n\tfor k := range m.tagSet {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}", "func (o *FiltersVmGroup) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (m *varMap) Keys() []string {\n\treturn m.keys\n}", "func (o *FiltersNet) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (o *FiltersVirtualGateway) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (r *Row) tagsKeys() []string {\n\ta := make([]string, len(r.Tags))\n\tfor k := range r.Tags {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}", "func (d *Descriptor) Keys() []core.Key {\n\treturn d.keys\n}", "func (o *FiltersSecurityGroup) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (d *MetadataAsDictionary) KeySet() []string {\n\tif d.metadata == nil {\n\t\td.Init()\n\t}\n\t// TODO: pre-allocate res\n\tvar res []string\n\tfor k := range d.metadata {\n\t\tres = append(res, k)\n\t}\n\treturn res\n}", "func (d *Data) GetKeys() []string {\n\tvar keys []string\n\tfor k := range d.Values {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (g *Graph) listOfKeys() []string {\n\tkeys := make([]string, len(g.Nodes))\n\ti := 0\n\n\tfor k := range g.Nodes {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\n\treturn keys\n}", "func (p *Partitions) Keys() []string {\n\tvar result = make([]string, 0)\n\tfor k := range p.index {\n\t\tresult = append(result, k)\n\t}\n\treturn result\n}", "func (m *Measurement) SeriesKeys() []string {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tvar keys []string\n\tfor _, s := range m.seriesByID {\n\t\tkeys = append(keys, s.Key)\n\t}\n\treturn keys\n}", "func Keys(i interface{}) (keys []string) {\n\tv := reflect.ValueOf(i)\n\n\tif v.Kind() != reflect.Map {\n\t\tfmt.Fprintf(os.Stderr, \"Input type is not a map type: %v\", v)\n\t\treturn nil\n\t}\n\n\tfor _,key := range v.MapKeys() {\n\t\tkeys = append(keys, key.Interface().(string))\n\t}\n\n\treturn keys\n}", "func (self *Map) Keys(tagName ...string) []interface{} {\n\treturn Keys(self.MapNative(tagName...))\n}", "func (a *Aliases) Keys() []string {\n\ta.mx.RLock()\n\tdefer a.mx.RUnlock()\n\n\tss := make([]string, 0, len(a.Alias))\n\tfor k := range a.Alias {\n\t\tss = append(ss, k)\n\t}\n\treturn ss\n}", "func (l *Labels) Keys() []string {\n\treturn l.keys\n}", "func (dict *Dictionary) GetKeys() []DictKey {\n\tdict.lock.RLock()\n\tdefer dict.lock.RUnlock()\n\tvar dictKeys []DictKey\n\tdictKeys = []DictKey{}\n\tvar key DictKey\n\tfor key = range dict.elements {\n\t\tdictKeys = append(dictKeys, key)\n\t}\n\treturn dictKeys\n}", "func getDimensionHash(dimensions map[string]string) string {\n\tvar keys []string\n\tfor k := range dimensions {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\th := fnv.New32a()\n\tfor _, key := range keys {\n\t\t_, _ = io.WriteString(h, key+\"\\t\"+dimensions[key]+\"\\n\")\n\t}\n\treturn strconv.Itoa(int(h.Sum32()))\n}", "func keys(m map[string]int32) []string {\n\tkeys := make([]string, len(m))\n\ti := 0\n\tfor k := range m {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn keys\n}", "func (p *AgentPool) Keys() []string {\n\tp.Lock()\n\tdefer p.Unlock()\n\tresult := make([]string, 0, len(p.data))\n\tfor k := range p.data {\n\t\tresult = append(result, k)\n\t}\n\treturn result\n}", "func Keys(m map[string]interface{}) []string {\n\tkeys := make([]string, len(m))\n\tvar i int\n\tfor k := range m {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn keys\n}", "func (p Problem) ExtensionKeys() []string {\n\treturn p.extensionKeys\n}", "func (c Collector) Keys() []string {\n\tvar keys []string\n\tc.Each(func(_ interface{}, k string, _ func()) {\n\t\tkeys = append(keys, k)\n\t})\n\treturn keys\n}", "func (ilos IndexedListOfStrings) Keys() []uint {\n\tlength := len(ilos)\n\tif length <= 0 {\n\t\treturn []uint{}\n\t}\n\n\tkeys := make([]uint, 0, length)\n\tfor k := range ilos {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (i IntHashMap[T, V]) Keys() []T {\n\tresult := make([]T, 0, len(i.hashToKey))\n\tfor _, key := range i.hashToKey {\n\t\tresult = append(result, key)\n\t}\n\treturn result\n}", "func filterDimensions(dimensions []*sfxpb.Dimension, dimensionsKeys []string) []*sfxpb.Dimension {\n\tif len(dimensions) == 0 || len(dimensionsKeys) == 0 {\n\t\treturn nil\n\t}\n\tresult := make([]*sfxpb.Dimension, 0, len(dimensionsKeys))\n\tfor _, dk := range dimensionsKeys {\n\t\tfor _, d := range dimensions {\n\t\t\tif d.Key == dk {\n\t\t\t\tresult = append(result, d)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}", "func (s *store) Keys() []string {\n\ts.worldMu.Lock()\n\tdefer s.worldMu.Unlock()\n\treturn s.hashMap.Keys()\n}", "func (m *MultiMap) Keys() []interface{} {\n\tkeys := make([]interface{}, m.Size())\n\tcount := 0\n\tfor key, value := range m.m {\n\t\tfor range value {\n\t\t\tkeys[count] = key\n\t\t\tcount++\n\t\t}\n\t}\n\treturn keys\n}", "func (ds *DataStore) GetAllKeys() []uint64 {\n\tds.dataStoreLock.RLock()\n\tdefer ds.dataStoreLock.RUnlock()\n\tkeys := make([]uint64, 0)\n\tfor k := range ds.kvSet {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (c *AdapterMemory) Keys(ctx context.Context) ([]interface{}, error) {\n\treturn c.data.Keys()\n}", "func (s *Store) Keys() []string {\n\ts.access.RLock()\n\tdefer s.access.RUnlock()\n\n\tcpy := make([]string, len(s.data))\n\ti := 0\n\tfor key, _ := range s.data {\n\t\tcpy[i] = key\n\t\ti++\n\t}\n\treturn cpy\n}", "func GetKeys(document bson.D, _range ...bool) []string {\n\tfilter := document.Map()\n\tvar arr []string\n\tfor key, val := range filter {\n\t\tif key == \"$or\" || key == \"$and\" {\n\t\t\tfor _, elem := range val.(primitive.A) {\n\t\t\t\tfor k, v := range elem.(bson.D).Map() {\n\t\t\t\t\tif len(k) > 0 && k[0] != '$' {\n\t\t\t\t\t\tif len(_range) == 0 || _range[0] == isRange(v) {\n\t\t\t\t\t\t\tarr = append(arr, getKey(k, v)...)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if len(key) > 0 && key[0] != '$' {\n\t\t\tif len(_range) == 0 || _range[0] == isRange(val) {\n\t\t\t\tarr = append(arr, getKey(key, val)...)\n\t\t\t}\n\t\t}\n\t}\n\treturn arr\n}", "func (m pbMetricMap) Keys() []string {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (s *metadataSupplier) Keys() []string {\n\tkeys := make([]string, 0, s.metadata.Len())\n\tmd := *s.metadata\n\tfor k := range md {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (p *Properties) Keys() []string {\r\n\tkeys := []string{}\r\n\tfor k := range p.dict {\r\n\t\tkeys = append(keys, k)\r\n\t}\r\n\r\n\treturn keys\r\n}", "func PartitionKeyType_Values() []string {\n\treturn []string{\n\t\tPartitionKeyTypeDimension,\n\t\tPartitionKeyTypeMeasure,\n\t}\n}", "func getKeys(dataMap map[string][]ServerEvent) []string {\n\tkeys := make([]string, len(dataMap))\n\n\ti := 0\n\tfor k := range dataMap {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn keys\n}", "func (self *Map) StringKeys(tagName ...string) []string {\n\treturn sliceutil.Stringify(self.Keys(tagName...))\n}", "func (v Var) Dims() (dims []Dim, err error) {\n\tvar ndims C.int\n\terr = newError(C.nc_inq_varndims(C.int(v.ds), C.int(v.id), &ndims))\n\tif err != nil {\n\t\treturn\n\t}\n\tif ndims == 0 {\n\t\treturn\n\t}\n\tdimids := make([]C.int, ndims)\n\terr = newError(C.nc_inq_vardimid(C.int(v.ds), C.int(v.id), &dimids[0]))\n\tif err != nil {\n\t\treturn\n\t}\n\tdims = make([]Dim, ndims)\n\tfor i, id := range dimids {\n\t\tdims[i] = Dim{v.ds, id}\n\t}\n\treturn\n}", "func (m QuantileMap) Keys() []float64 {\n\tresult := make([]float64, 0, len(m))\n\tfor q := range m {\n\t\tresult = append(result, q)\n\t}\n\tsort.Float64s(result)\n\treturn result\n}", "func (c *SyncCollector) Keys() []string {\n\tvar keys []string\n\tc.Each(func(_ interface{}, k string, _ func()) {\n\t\tkeys = append(keys, k)\n\t})\n\treturn keys\n}", "func (keyRing *KeyRing) KeyIds() []uint64 {\n\tvar res []uint64\n\tfor _, e := range keyRing.entities {\n\t\tres = append(res, e.PrimaryKey.KeyId)\n\t}\n\treturn res\n}", "func (mc MetaDataCarrier) Keys() []string {\n\tkeys := make([]string, 0, len(mc))\n\tfor k := range mc {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (m *OrderedIntMap) Keys() []int { return m.keys }", "func (s *Set) Keys() []cid.Cid {\n\ts.lk.Lock()\n\tdefer s.lk.Unlock()\n\tout := make([]cid.Cid, 0, len(s.set))\n\tfor k := range s.set {\n\t\tout = append(out, k)\n\t}\n\treturn out\n}", "func (o MongoIndexKeysPtrOutput) Keys() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *MongoIndexKeys) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Keys\n\t}).(pulumi.StringArrayOutput)\n}", "func (mm Uint64Uint64Map) Keys() Uint64List {\n\tif mm == nil {\n\t\treturn nil\n\t}\n\n\ts := make(Uint64List, 0, len(mm))\n\tfor k := range mm {\n\t\ts = append(s, k)\n\t}\n\treturn s\n}", "func Keys(i interface{}) (keys []string, ok bool) {\n\tkeys = make([]string, 0)\n\tv, k := preprocess(i)\n\tswitch k {\n\tcase reflect.Map:\n\t\tk := v.MapKeys()\n\t\tfor i := range k {\n\t\t\ts, ok := k[i].Interface().(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tkeys = append(keys, s)\n\t\t}\n\t\treturn keys, true\n\tcase reflect.Struct:\n\t\tt := v.Type()\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\tt2 := t.Field(i)\n\t\t\tfname := t2.Tag.Get(\"duck\")\n\t\t\tif fname != \"-\" {\n\t\t\t\tif fname == \"\" {\n\t\t\t\t\tfname = t2.Name\n\t\t\t\t}\n\t\t\t\tkeys = append(keys, fname)\n\t\t\t}\n\t\t}\n\t\treturn keys, true\n\t}\n\treturn nil, false\n}", "func (m LabelMap) Keys() []string {\n\tresult := make([]string, 0, len(m))\n\tfor label := range m {\n\t\tresult = append(result, label)\n\t}\n\tsort.Strings(result)\n\treturn result\n}", "func (b bindingContainer) InterfaceNames() (keys []string) {\n\t//TODO: use Interfaces() here.\n\tkeys = make([]string, len(b))\n\ti := 0\n\tfor k, _ := range b {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn\n}", "func (ns *Namespace) ListKeys() []string {\n\tkeys := make([]string, len(ns.ns))\n\tidx := 0\n\tfor k, _ := range ns.ns {\n\t\tkeys[idx] = k\n\t\tidx += 1\n\t}\n\treturn keys\n}", "func (m Points) Keys() []string {\n\tvar keys []string\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (p MapStringToFloat64Ptrs) Keys() []string {\n\tkeys := make([]string, p.Len())[0:0]\n\tfor key := range p {\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys\n}", "func (m Packages) Keys() (keys []string) {\n\tfor name := range m {\n\t\tkeys = append(keys, name)\n\t}\n\n\treturn\n}", "func GetKeys(collection string) ([]string, error) {\n\treturn getStorage().GetKeys(collection)\n}", "func (n *nasdb) GetKeys() []int {\n\tvar keys []int\n\tn.mut.RLock()\n\tfor k := range n.data {\n\t\tkeys = append(keys, k)\n\t}\n\tn.mut.RUnlock()\n\treturn keys\n}", "func (m *SnmpMetricCfg) GetUsedVarNames() ([]string, error) {\n\tif m.DataSrcType != \"STRINGEVAL\" {\n\t\treturn nil, nil\n\t}\n\texpression, err := govaluate.NewEvaluableExpression(m.ExtraData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn expression.Vars(), nil\n}", "func (*ttlCache) metricDimensionsToMapKey(name string, tags []string) string {\n\tvar metricKeyBuilder strings.Builder\n\n\tdimensions := make([]string, len(tags))\n\tcopy(dimensions, tags)\n\n\tdimensions = append(dimensions, name)\n\tsort.Strings(dimensions)\n\n\tfor _, dim := range dimensions {\n\t\tconcatDimensionValue(&metricKeyBuilder, dim)\n\t}\n\treturn metricKeyBuilder.String()\n}", "func (o MongoIndexKeysOutput) Keys() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v MongoIndexKeys) []string { return v.Keys }).(pulumi.StringArrayOutput)\n}", "func (that *StrAnyMap) Keys() []string {\n\tthat.mu.RLock()\n\tvar (\n\t\tkeys = make([]string, len(that.data))\n\t\tindex = 0\n\t)\n\tfor key := range that.data {\n\t\tkeys[index] = key\n\t\tindex++\n\t}\n\tthat.mu.RUnlock()\n\treturn keys\n}", "func (i StringHashMap[T, V]) Keys() []T {\n\tresult := make([]T, 0, len(i.hashToKey))\n\tfor _, key := range i.hashToKey {\n\t\tresult = append(result, key)\n\t}\n\treturn result\n}", "func Keys(mp any) (keys []string) {\n\trftVal := reflect.Indirect(reflect.ValueOf(mp))\n\tif rftVal.Kind() != reflect.Map {\n\t\treturn\n\t}\n\n\tkeys = make([]string, 0, rftVal.Len())\n\tfor _, key := range rftVal.MapKeys() {\n\t\tkeys = append(keys, key.String())\n\t}\n\treturn\n}", "func (m *MultiMap) KeySet() []interface{} {\n\tkeys := make([]interface{}, len(m.m))\n\tcount := 0\n\tfor key := range m.m {\n\t\tkeys[count] = key\n\t\tcount++\n\t}\n\treturn keys\n}", "func (tree *Tree) Keys() []interface{} {\n\tkeys := make([]interface{}, tree.size)\n\tit := tree.Iterator()\n\tfor i := 0; it.Next(); i++ {\n\t\tkeys[i] = it.Key()\n\t}\n\treturn keys\n}", "func (descriptor *EntityDescriptor) GetKeyValues(entity interface{}) []interface{} {\n\tif entity == nil {\n\t\tpanic(\"not able to get key value for nil entity\")\n\t}\n\n\tif (descriptor.key == nil || descriptor.key.field == nil) && len(descriptor.multiKeys) == 0 {\n\t\treturn nil\n\t}\n\n\tvalue := reflect.ValueOf(entity)\n\tif value.Kind() == reflect.Ptr {\n\t\tvalue = value.Elem()\n\t}\n\n\tif descriptor.key != nil {\n\t\tkeyValue := value.FieldByIndex(descriptor.key.field.field.Index).Interface()\n\t\treturn []interface{}{keyValue}\n\t}\n\n\tkeyValues := make([]interface{}, 0, len(descriptor.multiKeys))\n\tfor _, k := range descriptor.multiKeys {\n\t\tkeyValues = append(keyValues, value.FieldByIndex(k.field.Index).Interface())\n\t}\n\n\treturn keyValues\n}", "func (keyRing *KeyRing) GetKeyIDs() []uint64 {\n\tvar res = make([]uint64, len(keyRing.entities))\n\tfor id, e := range keyRing.entities {\n\t\tres[id] = e.PrimaryKey.KeyId\n\t}\n\treturn res\n}", "func Keys(m Map) []Key {\n\treturn m.keys()\n}", "func (instance *DSInstance) GetKeys(ctx context.Context, query *datastore.Query) ([]*datastore.Key, error) {\n\t// attempt to populate results from Datastore client\n\treturn instance.client.GetAll(ctx, query, nil)\n}", "func getKeys(data map[string]interface{}) []string {\n\tvar list []string\n\tfor key := range data {\n\t\tlist = append(list, key)\n\t}\n\treturn list\n}", "func (ctx context) Keyvals() []interface{} {\n\treturn ctx.keyvals\n}", "func (m ConcurrentMap[T]) Keys() []string {\n\tkeys := make([]string, 0)\n\tfor _, shard := range m.shards {\n\t\tshard.RLock()\n\t\tfor key := range shard.items {\n\t\t\tkeys = append(keys, key)\n\t\t}\n\t\tshard.RUnlock()\n\t}\n\treturn keys\n}", "func (md *MetaData) Keys() []Key {\n\treturn md.keys\n}", "func (c *StringValueMap) Keys() []string {\n\tkeys := []string{}\n\tfor key := range c.value {\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys\n}", "func keys(m map[int]struct{}) []int {\n\ts := make([]int, 0, len(m))\n\tfor id := range m {\n\t\ts = append(s, id)\n\t}\n\treturn s\n}", "func GetMapKeys(slices map[string][]string) (res []string) {\n\tfor key := range slices {\n\t\tres = append(res, key)\n\t}\n\treturn\n}", "func EnumerateDebugRenderableKeys() []string {\n\tkeys := []string{}\n\tdebugMap.Range(func(k, v interface{}) bool {\n\t\tkey, ok := k.(string)\n\t\tif ok {\n\t\t\tkeys = append(keys, key)\n\t\t}\n\t\treturn true\n\t})\n\treturn keys\n}", "func (m *OrderedMap) Keys() []string {\n\tkeys := []string{}\n\tfor k, _ := range m.Map() {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (keyRing *KeyRing) GetKeys() []*Key {\n\tkeys := make([]*Key, keyRing.CountEntities())\n\tfor i, entity := range keyRing.entities {\n\t\tkeys[i] = &Key{entity}\n\t}\n\treturn keys\n}", "func Keys[K comparable, V any](in map[K]V) []K {\n\tresult := make([]K, 0, len(in))\n\n\tfor k := range in {\n\t\tresult = append(result, k)\n\t}\n\n\treturn result\n}", "func keys(m map[string][]string) []string {\n\tres := make([]string, len(m))\n\tfor k := range m {\n\t\tres = append(res, string(k))\n\t}\n\treturn res\n}", "func (m *OrderedMap) Keys() []string { return m.keys }", "func (tn *TreeNode) Keys() []string {\n\tkeys := make([]string, len(tn.Children))\n\n\ti := 0\n\tfor k := range tn.Children {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\tsort.StringSlice.Sort(keys)\n\treturn keys\n}", "func (c *Cache) Keys() []string {\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\tresult := make([]string, 0, len(c.data))\n\tfor key := range c.data {\n\t\tresult = append(result, key)\n\t}\n\n\treturn result\n}", "func (ms *MemStore) Keys() []string {\n\tms.mu.RLock()\n\ts := make([]string, 0, len(ms.data))\n\tfor key := range ms.data {\n\t\ts = append(s, key)\n\t}\n\tms.mu.RUnlock()\n\tsort.Strings(s)\n\treturn s\n}", "func (this *KeyspaceTerm) Keys() expression.Expression {\n\treturn this.keys\n}", "func TimeDimension_Values() []string {\n\treturn []string{\n\t\tTimeDimensionHours,\n\t\tTimeDimensionDays,\n\t\tTimeDimensionWeeks,\n\t}\n}", "func KeySet(m map[string]interface{}) []string {\n\tkeys := make([]string, len(m))\n\ti := 0\n\tfor k := range m {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn keys\n}", "func keys(m map[string]string) []string {\n\tvar keys []string\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (division TODivision) GetKeys() (map[string]interface{}, bool) {\n\tif division.ID == nil {\n\t\treturn map[string]interface{}{\"id\": 0}, false\n\t}\n\treturn map[string]interface{}{\"id\": *division.ID}, true\n}", "func GetLogKeys() map[string]bool {\n\tconsoleLogKeys := ConsoleLogKey().EnabledLogKeys()\n\tlogKeys := make(map[string]bool, len(consoleLogKeys))\n\tfor _, v := range consoleLogKeys {\n\t\tlogKeys[v] = true\n\t}\n\treturn logKeys\n}", "func (k *KeyValue) GetKeys() (keys []string) {\n\tfor key := range k.Raw {\n\t\tkeys = append(keys, key)\n\t}\n\treturn\n}", "func (klgb *K8sLabelGroupBy) IntsX(ctx context.Context) []int {\n\tv, err := klgb.Ints(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}", "func (c *LRU) Keys() []interface{} {\n\tkeys := make([]interface{}, c.evictList.Len())\n\ti := 0\n\tfor ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {\n\t\tkeys[i] = ent.Value.(*utils.Entry).Aliases\n\t\ti++\n\t}\n\treturn keys\n}", "func XPriDimsByProdID(db XODB, prodID uint) ([]*XPriDim, error) {\n\tvar err error\n\n\t// sql query\n\tconst sqlstr = `SELECT ` +\n\t\t`id, value, prod_id, pri_dd_id, created_by, updated_by, created_at, updated_at ` +\n\t\t`FROM x_showroom.x_pri_dim ` +\n\t\t`WHERE prod_id = ?`\n\n\t// run query\n\tXOLog(sqlstr, prodID)\n\tq, err := db.Query(sqlstr, prodID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer q.Close()\n\n\t// load results\n\tres := []*XPriDim{}\n\tfor q.Next() {\n\t\txpd := XPriDim{\n\t\t\t_exists: true,\n\t\t}\n\n\t\t// scan\n\t\terr = q.Scan(&xpd.ID, &xpd.Value, &xpd.ProdID, &xpd.PriDdID, &xpd.CreatedBy, &xpd.UpdatedBy, &xpd.CreatedAt, &xpd.UpdatedAt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres = append(res, &xpd)\n\t}\n\n\treturn res, nil\n}", "func (sf SearchFilter) GetKeys() []string {\n\tret := funk.Keys(sf.Keys).([]string)\n\tsort.Strings(ret)\n\treturn ret\n}", "func (m AggregationRecord) GetKeys() map[string]string {\n\treturn m.Keys\n}" ]
[ "0.64260674", "0.59128034", "0.5869726", "0.5851657", "0.5818404", "0.57840663", "0.5758748", "0.56994116", "0.5590301", "0.5536994", "0.54531986", "0.544049", "0.54329765", "0.54284394", "0.5335913", "0.5327702", "0.5310732", "0.53080714", "0.5301138", "0.52743703", "0.5270603", "0.5252172", "0.52398014", "0.5238167", "0.5201266", "0.51981974", "0.5185251", "0.5183213", "0.51822895", "0.5169827", "0.5163352", "0.5150785", "0.5146297", "0.51429605", "0.51391834", "0.51311797", "0.5124061", "0.5122564", "0.5110459", "0.5101905", "0.50995106", "0.5092728", "0.5074202", "0.50672615", "0.5064236", "0.50592744", "0.50579673", "0.50488496", "0.504851", "0.50350237", "0.502311", "0.5011669", "0.50052327", "0.5005113", "0.5004706", "0.4996948", "0.4979056", "0.49710602", "0.4968751", "0.49681163", "0.49535438", "0.49477863", "0.49346995", "0.49337506", "0.49318123", "0.4931721", "0.4926297", "0.49158642", "0.4915317", "0.49030733", "0.49001205", "0.48662025", "0.48594233", "0.4856287", "0.48545855", "0.48519608", "0.48491704", "0.48490322", "0.48421916", "0.48306957", "0.48293227", "0.48148054", "0.4810329", "0.48074132", "0.48064864", "0.47931102", "0.4791057", "0.4783006", "0.47790504", "0.47748873", "0.47621682", "0.47619897", "0.47534654", "0.47519112", "0.47404405", "0.47396296", "0.47271085", "0.47248825", "0.47222477", "0.4719125" ]
0.8072088
0
newMapper returns a new instance of mapper.
func newMapper(e *Executor, seriesID uint32, fieldID uint8, typ DataType) *mapper { return &mapper{ executor: e, seriesID: seriesID, fieldID: fieldID, typ: typ, c: make(chan map[string]interface{}, 0), done: make(chan chan struct{}, 0), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewMapper(config MapperConfig) *Mapper {\n\tm := Mapper{\n\t\tconfig: config,\n\t\tregexes: make([]*regexp.Regexp, len(config.Mappings)),\n\t\tvalidate: validator.New(),\n\t}\n\n\tm.Initialize()\n\n\treturn &m\n}", "func NewMapper(isBiFlow bool) *Mapper {\n\treturn &Mapper{\n\t\tmmap: make(map[string]*[]*gopacket.Packet, 0),\n\t\tisBiFlow: isBiFlow,\n\t}\n}", "func NewMapper(m func(srcPtr interface{}, destPtr interface{}) error) Mapper {\n return funcMapper(m)\n}", "func New(mappingMode MappingMode, panicOnMissingFields bool) StructMapper {\n\treturn StructMapper{make(map[string]interface{}), mappingMode, panicOnMissingFields}\n}", "func NewMapper(content string) *Mapper {\n\tm := &Mapper{\n\t\tcontent: content,\n\t}\n\n\t// Precompute line offsets, for easy deduction of the line number for a global offset\n\tm.offsets = make([]int, 0, 32)\n\tm.offsets = append(m.offsets, 0) // First line starts at offset 0.\n\tfor offset, r := range content {\n\t\tif r == '\\n' {\n\t\t\tm.offsets = append(m.offsets, offset+1)\n\t\t}\n\t}\n\n\t// Introduce an artificial last line.\n\tm.offsets = append(m.offsets, len(content))\n\n\treturn m\n}", "func newAzureNetworkMapper() *AzureNetworkMapper {\n\treturn &AzureNetworkMapper{}\n}", "func NewMapFunc(t mockConstructorTestingTNewMapFunc) *MapFunc {\n\tmock := &MapFunc{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewFlowMapper(args *Args) *FlowMapper {\n\treturn &FlowMapper{workdir: args.WorkDir, mapfile: args.MapFile}\n}", "func New(log zerolog.Logger, chain Chain, feed Feeder, index index.Writer, options ...func(*MapperConfig)) (*Mapper, error) {\n\n\t// We don't use a checkpoint by default. The options can set one, in which\n\t// case we will add the checkpoint as a finalized state commitment in our\n\t// trie registry.\n\tcfg := MapperConfig{\n\t\tCheckpointFile: \"\",\n\t\tPostProcessing: PostNoop,\n\t}\n\tfor _, option := range options {\n\t\toption(&cfg)\n\t}\n\n\t// Check if the checkpoint file exists.\n\tif cfg.CheckpointFile != \"\" {\n\t\tstat, err := os.Stat(cfg.CheckpointFile)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid checkpoint file: %w\", err)\n\t\t}\n\t\tif stat.IsDir() {\n\t\t\treturn nil, fmt.Errorf(\"invalid checkpoint file: directory\")\n\t\t}\n\t}\n\n\ti := Mapper{\n\t\tlog: log,\n\t\tchain: chain,\n\t\tfeed: feed,\n\t\tindex: index,\n\t\tcheckpoint: cfg.CheckpointFile,\n\t\tpost: cfg.PostProcessing,\n\t\twg: &sync.WaitGroup{},\n\t\tstop: make(chan struct{}),\n\t}\n\n\treturn &i, nil\n}", "func NewMap(r *goja.Runtime) *Map {\n\treturn &Map{runtime: r}\n}", "func NewCMapper() (m *CMapper) {\n\tm = new(CMapper)\n\tm.charToStr = make(map[int]string)\n\tm.strToChar = make(map[string]int)\n\treturn\n}", "func New(options *Options) (*MPD, error) {\n\tif len(options.MapInfos) == 0 {\n\t\treturn nil, errors.New(\"there are not any maport info\")\n\t}\n\tmpd := make(map[string]*Mapper)\n\tlogger := log.NewLogger(\"main\")\n\tlogger.Info(\"maportd create\")\n\tfor _, info := range options.MapInfos {\n\t\tmapper, err := NewMapper(info.Port, info.DestAddr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif _, ok := mpd[mapper.name]; ok {\n\t\t\treturn nil, fmt.Errorf(\"the mapper %s is duplicate\", mapper.name)\n\t\t}\n\t\tmpd[mapper.name] = mapper\n\t}\n\treturn &MPD{mpd, logger}, nil\n}", "func New() *Map { return new(Map).Init() }", "func NewResourceMapper(resolver Resolver) ResourceMapper {\n\treturn &resourceMapper{\n\t\tres: resolver,\n\t}\n}", "func NewMap(less func(a, b interface{}) bool) *Map {\n\treturn &Map{\n\t\tless: less,\n\t}\n}", "func NewURLMapper(reader io.Reader) (Mapper, error) {\n\tu := &URLMapper{}\n\tif err := u.loadRules(reader); err != nil {\n\t\treturn u, err\n\t}\n\treturn u, nil\n}", "func newLockMap() *lockMap {\n\treturn &lockMap{\n\t\tmutexMap: make(map[string]*sync.Mutex),\n\t}\n}", "func NewTagMapper(tags ...string) *TagMapper {\n\treturn &TagMapper{\n\t\ttags: tags,\n\t\ttypes: map[reflect.Type]KeyIndexes{},\n\t}\n}", "func newProgramMap() *programMap {\n\treturn &programMap{\n\t\tp: make(map[uint32]uint16),\n\t}\n}", "func (a *ByAttribute) newMap(data interface{}, reflectItems []*modelsNormalization.ReflectStructItem) (reflect.Value, error) {\n\tif len(reflectItems) == 0 {\n\t\treturn reflect.ValueOf(map[string]interface{}{}), nil\n\t}\n\n\tif mapElemType, equal := modelsNormalization.EqualStructItemType(reflectItems); equal {\n\t\tmapKeyType := reflect.TypeOf(string(\"\"))\n\t\tnewMap := reflect.MakeMap(reflect.MapOf(mapKeyType, mapElemType))\n\t\tfor _, item := range reflectItems {\n\t\t\tnewMap.SetMapIndex(reflect.ValueOf(item.MapKeyName), item.Value)\n\t\t}\n\t\treturn newMap, nil\n\t} else {\n\t\treturn a.newStruct(data, reflectItems)\n\t}\n}", "func NewMap() Map {\n\treturn Map{NewSet()}\n}", "func newSimpleMapping(name string, m map[byte]rune) *simpleMapping {\n\treturn &simpleMapping{\n\t\tbaseName: name,\n\t\tdecode: m,\n\t}\n}", "func NewDevMapper() DevMapper {\n\tm := &mapper{\n\t\tcache: make(map[string]string),\n\t}\n\t// loop through logical drives and query the DOS device name\n\tfor _, drive := range sys.GetLogicalDrives() {\n\t\tdevice, err := sys.QueryDosDevice(drive)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tm.cache[device] = drive\n\t}\n\treturn m\n}", "func newMapWalker(r proto.Reader) (*mapWalker, int, error) {\n\tt, err := r.ReadByte()\n\tif err != nil {\n\t\treturn nil, BadProtocol, err\n\t}\n\tif types.DbType(t) != types.Map {\n\t\tif t == byte(UnsupportedProtocol) || t == byte(BadProtocol) {\n\t\t\treturn nil, UnsupportedProtocol, fmt.Errorf(\"unsupported protocol\")\n\t\t}\n\t\treturn nil, BadProtocol, fmt.Errorf(\"stream must point to a MAP, it points to %v of type %[1]T\", t)\n\t}\n\t_, err = r.ReadInt() // total length of map in bytes\n\tnumElements, err := r.ReadInt()\n\tif err != nil {\n\t\treturn nil, BadProtocol, err\n\t}\n\tif numElements < 0 || numElements > maxNumElements {\n\t\treturn nil, BadProtocol, fmt.Errorf(\"invalid number of map elements: %d\", numElements)\n\t}\n\treturn &mapWalker{r, numElements, \"\", 0}, 0, nil\n}", "func NewRawMapper(sh *Shard, stmt *influxql.SelectStatement) *RawMapper {\n\treturn &RawMapper{\n\t\tshard: sh,\n\t\tstmt: stmt,\n\t}\n}", "func NewMap(items ...MapItem) *Map {\n\treturn newMap(items)\n}", "func execNewMap(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := types.NewMap(args[0].(types.Type), args[1].(types.Type))\n\tp.Ret(2, ret)\n}", "func newConfigMap(configMapName, namespace string, labels map[string]string,\n\tkibanaIndexMode, esUnicastHost, rootLogger, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount string) *v1.ConfigMap {\n\n\terr, data := renderData(kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount, rootLogger)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn &v1.ConfigMap{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"ConfigMap\",\n\t\t\tAPIVersion: v1.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: configMapName,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tData: data,\n\t}\n}", "func New(dir, name string) (mp *MapDB, err error) {\n\tvar m MapDB\n\t// Initialize map\n\tm.m = make(map[string]string)\n\n\t// Encryption middleware\n\tcmw := middleware.NewCryptyMW([]byte(\" encryption key \"), make([]byte, 16))\n\tif cmw == nil {\n\n\t}\n\n\t// Create a new instance of mrT\n\tif m.mrT, err = mrT.New(dir, name); err != nil {\n\t\treturn\n\t}\n\n\tif err = m.mrT.ForEach(m.load); err != nil {\n\t\treturn\n\t}\n\n\t// Assign pointer to our MapDB\n\tmp = &m\n\treturn\n}", "func New() Map {\n\treturn empty\n}", "func (b *Batis) Mapper(binding string) *mapper {\n\tmp, have := b.mappers[binding]\n\tif !have {\n\t\tb.Logger.Panicf(\"mapper: no binding [%v]\", binding)\n\t}\n\t_, mds := b.MultiDS.defaultDS()\n\tmp.currentDS = mds\n\treturn mp\n}", "func NewMap(csvData io.Reader) (m *MapMapper, err error) {\n\tm = &MapMapper{}\n\tr := csv.NewReader(csvData)\n\tr.LazyQuotes = true\n\tr.FieldsPerRecord = 2\n\tvar row []string\n\trow, err = r.Read()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tm.From = row[0]\n\tm.To = row[1]\n\tm.m = make(map[string]string)\n\ti := 0\n\tfor {\n\t\ti++\n\t\trow, err = r.Read()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tm.m[row[0]] = row[1]\n\t}\n\treturn\n}", "func NewMap(key, val Type) Type {\n\treturn &Map{\n\t\tnewBaseType(),\n\t\tkey,\n\t\tval,\n\t}\n}", "func (db *DB) MapperFunc(mf func(string) string) {\n db.Mapper = reflectx.NewMapperFunc(\"db\", mf)\n}", "func NewMap(in io.Reader) (*Map, error) {\n\tvar mapper Map\n\tif err := json.NewDecoder(in).Decode(&mapper); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &mapper, nil\n}", "func NewConfigurationMapper(configs []interfaces.ConfigurationInterface) *ConfigurationMapper {\n\tresult := ConfigurationMapper{\n\t\tActionMap: make(map[string][]interfaces.ConfigurationInterface),\n\t}\n\tfor _, config := range configs {\n\t\tfor _, action := range config.GetActions() {\n\t\t\tlist := result.ActionMap[action]\n\t\t\tlist = append(list, config)\n\t\t\tresult.ActionMap[action] = list\n\t\t}\n\t}\n\treturn &result\n}", "func NewMap(list *MapItem) *Map {\n\tcmap := &Map{}\n\tcmap.InitMap(list)\n\treturn cmap\n}", "func newMetricIDMapping(metricID, sequence uint32) MetricIDMapping {\n\treturn &metricIDMapping{\n\t\tmetricID: metricID,\n\t\thash2SeriesID: make(map[uint64]uint32),\n\t\tidSequence: *atomic.NewUint32(sequence), // first value is 1\n\t\tmaxSeriesIDsLimit: *atomic.NewUint32(constants.DefaultMaxSeriesIDsCount),\n\t}\n}", "func (e *exprHelper) NewMap(entries ...ast.EntryExpr) ast.Expr {\n\treturn e.exprFactory.NewMap(e.nextMacroID(), entries)\n}", "func newMappingEvent() *mappingEvent {\n\treturn &mappingEvent{\n\t\tevents: make(map[uint32]*metricEvent),\n\t}\n}", "func New(behavior Func) structmap.Behavior {\n\treturn behavior\n}", "func newPager() *pager {\n\treturn &pager{\n\t\tpageMap: make(map[int]page),\n\t\tpagesize: 500,\n\t}\n}", "func NewListMapper() ListMapper {\n\treturn ListMapper{}\n}", "func NewMapHasher(baseHasher hashers.MapHasher) hashers.MapHasher {\n\treturn &objmaphasher{\n\t\tMapHasher: baseHasher,\n\t}\n}", "func newConfigmap(customConfigmap *customConfigMapv1alpha1.CustomConfigMap) *corev1.ConfigMap {\n\tlabels := map[string]string{\n\t\t\"name\": customConfigmap.Spec.ConfigMapName,\n\t\t\"customConfigName\": customConfigmap.Name,\n\t\t\"latest\": \"true\",\n\t}\n\tname := fmt.Sprintf(\"%s-%s\", customConfigmap.Spec.ConfigMapName, RandomSequence(5))\n\tconfigName := NameValidation(name)\n\treturn &corev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: configName,\n\t\t\tNamespace: customConfigmap.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(customConfigmap, customConfigMapv1alpha1.SchemeGroupVersion.WithKind(\"CustomConfigMap\")),\n\t\t\t},\n\t\t\tLabels: labels,\n\t\t},\n\t\tData: customConfigmap.Spec.Data,\n\t\tBinaryData: customConfigmap.Spec.BinaryData,\n\t}\n}", "func newBlockMapFile(blkSize uint64, regFile regularFile) (*blockMapFile, error) {\n\tfile := &blockMapFile{regFile: regFile}\n\tfile.regFile.impl = file\n\n\ttoFill := uint64(12)\n\tblksUsed := regFile.blksUsed(blkSize)\n\tif blksUsed < toFill {\n\t\ttoFill = blksUsed\n\t}\n\n\tblkMap := regFile.inode.diskInode.Data()\n\tfile.fileToPhysBlks = make([]uint32, toFill)\n\tfor i := uint64(0); i < toFill; i++ {\n\t\tbinary.Unmarshal(blkMap[i*4:(i+1)*4], binary.LittleEndian, &file.fileToPhysBlks[i])\n\t}\n\treturn file, nil\n}", "func NewVmwareMapper(vm *object.VirtualMachine, vmProperties *mo.VirtualMachine, hostProperties *mo.HostSystem, credentials *DataVolumeCredentials, mappings *v1beta1.VmwareMappings, instanceUID string, namespace string, osFinder vos.OSFinder) *VmwareMapper {\n\treturn &VmwareMapper{\n\t\tcredentials: credentials,\n\t\thostProperties: hostProperties,\n\t\tinstanceUID: instanceUID,\n\t\tmappings: mappings,\n\t\tnamespace: namespace,\n\t\tosFinder: osFinder,\n\t\tvm: vm,\n\t\tvmProperties: vmProperties,\n\t}\n}", "func New(sources ...Source) (*Map, proc.Runner) {\n\tif len(sources) == 0 {\n\t\tsources = append(sources, Database())\n\t}\n\n\tm := &Map{\n\t\tsource: Multi(sources...),\n\t\tready: make(chan struct{}),\n\t}\n\n\treturn m, m.runner\n}", "func NewMap(inner map[Value]Value) Map {\n\treturn Map{&inner}\n}", "func NewMap() Map {\n\treturn &sortedMap{}\n}", "func NewMap(ctx *pulumi.Context,\n\tname string, args *MapArgs, opts ...pulumi.ResourceOption) (*Map, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Configuration == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Configuration'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Map\n\terr := ctx.RegisterResource(\"aws-native:location:Map\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func newUserIdentityMappings(c *Client) *userIdentityMappings {\n\treturn &userIdentityMappings{\n\t\tr: c,\n\t}\n}", "func NewMutMap(onNewTeam, onNewLeader chan string) *MutMap {\n\tmm := MutMap{\n\t\tonNewTeam: onNewTeam,\n\t\tonNewLeader: onNewLeader,\n\t}\n\tmm.teams = make(map[string]server.Team)\n\treturn &mm\n}", "func NewMapping(address common.Address, backend bind.ContractBackend) (*Mapping, error) {\n\tcontract, err := bindMapping(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Mapping{MappingCaller: MappingCaller{contract: contract}, MappingTransactor: MappingTransactor{contract: contract}, MappingFilterer: MappingFilterer{contract: contract}}, nil\n}", "func GetNewRelationsMapper(s *runtime.Scheme) *ObjRelMapper {\n\tret := &ObjRelMapper{}\n\tret.init(s, gen.ObjRelations)\n\treturn ret\n}", "func NewMmap(filename string, mode int) (*Mmap, error) {\n\tm := &Mmap{\n\t\tMmapBytes: make([]byte, 0),\n\t\tFileName: filename,\n\t}\n\n\tfileMode := os.O_RDWR\n\tfileCreateMode := os.O_RDWR | os.O_CREATE | os.O_TRUNC\n\tif mode == ModeCreate {\n\t\tfileMode = fileCreateMode\n\t}\n\tfile, err := os.OpenFile(filename, fileMode, 0664)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfileInfo, err := file.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.FileLen = fileInfo.Size()\n\tif mode == ModeCreate || m.FileLen == 0 {\n\t\tsyscall.Ftruncate(int(file.Fd()), m.FileLen+preAllocatedSpace)\n\t\tm.FileLen = m.FileLen + preAllocatedSpace\n\t}\n\tm.MmapBytes, err = syscall.Mmap(int(file.Fd()), 0, int(m.FileLen), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"mapping file error\")\n\t}\n\tm.FileHandler = file\n\treturn m, nil\n}", "func (n *Ncd) AddMapper(mapper eventmappers.Mapper) *Ncd {\n\tn._mappers = append(n._mappers, &mapper)\n\treturn n\n}", "func NewMetricMapper(configProfiles []config.MappingProfile, cacheSize int) (*MetricMapper, error) {\n\tprofiles := make([]MappingProfile, 0, len(configProfiles))\n\tfor profileIndex, configProfile := range configProfiles {\n\t\tif configProfile.Name == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"missing profile name %d\", profileIndex)\n\t\t}\n\t\tif configProfile.Prefix == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"missing prefix for profile: %s\", configProfile.Name)\n\t\t}\n\t\tprofile := MappingProfile{\n\t\t\tName: configProfile.Name,\n\t\t\tPrefix: configProfile.Prefix,\n\t\t\tMappings: make([]*MetricMapping, 0, len(configProfile.Mappings)),\n\t\t}\n\t\tfor i, currentMapping := range configProfile.Mappings {\n\t\t\tmatchType := currentMapping.MatchType\n\t\t\tif matchType == \"\" {\n\t\t\t\tmatchType = matchTypeWildcard\n\t\t\t}\n\t\t\tif matchType != matchTypeWildcard && matchType != matchTypeRegex {\n\t\t\t\treturn nil, fmt.Errorf(\"profile: %s, mapping num %d: invalid match type, must be `wildcard` or `regex`\", profile.Name, i)\n\t\t\t}\n\t\t\tif currentMapping.Name == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"profile: %s, mapping num %d: name is required\", profile.Name, i)\n\t\t\t}\n\t\t\tif currentMapping.Match == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"profile: %s, mapping num %d: match is required\", profile.Name, i)\n\t\t\t}\n\t\t\tregex, err := buildRegex(currentMapping.Match, matchType)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tprofile.Mappings = append(profile.Mappings, &MetricMapping{name: currentMapping.Name, tags: currentMapping.Tags, regex: regex})\n\t\t}\n\t\tprofiles = append(profiles, profile)\n\t}\n\tcache, err := newMapperCache(cacheSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &MetricMapper{Profiles: profiles, cache: cache}, nil\n}", "func New() *Mock {\n\treturn &Mock{\n\t\tm: mockMap{},\n\t\toldTransport: http.DefaultTransport,\n\t}\n}", "func NewMap(size int) *Map {\n\tif size <= 0 {\n\t\tsize = runtime.GOMAXPROCS(0)\n\t}\n\tsplits := make([]Split, size)\n\tfor i := range splits {\n\t\tsplits[i].Map = make(map[interface{}]interface{})\n\t}\n\treturn &Map{splits}\n}", "func newMapReact() *mapReact {\n\tma := mapReact{ma: make(map[SenderDetachCloser]bool)}\n\treturn &ma\n}", "func newMap() map[interface{}]interface{} {\n\treturn map[interface{}]interface{}{}\n}", "func New(opts *Options) *Map {\n\tif opts == nil {\n\t\topts = &Options{}\n\t}\n\tstore := newStore(opts)\n\tm := &Map{\n\t\tstore: store,\n\t\tkeeper: newKeeper(store),\n\t}\n\tgo m.keeper.run()\n\treturn m\n}", "func NewMap(key, elem Type) *Map {\n\treturn &Map{key: key, elem: elem}\n}", "func newRouter() *router {\n\treturn &router{\n\t\troots: make(map[string]*node),\n\t}\n}", "func New() *DynMap {\n\treturn &DynMap{make(map[string]interface{})}\t\n}", "func newMapping(prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, force bool) (*mappingTable, error) {\n\tmt := &mappingTable{\n\t\tprof: prof,\n\t\tsegments: make(map[*profile.Mapping]plugin.ObjFile),\n\t}\n\n\t// Identify used mappings\n\tmappings := make(map[*profile.Mapping]bool)\n\tfor _, l := range prof.Location {\n\t\tmappings[l.Mapping] = true\n\t}\n\n\tmissingBinaries := false\n\tfor midx, m := range prof.Mapping {\n\t\tif !mappings[m] {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Do not attempt to re-symbolize a mapping that has already been symbolized.\n\t\tif !force && (m.HasFunctions || m.HasFilenames || m.HasLineNumbers) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif m.File == \"\" {\n\t\t\tif midx == 0 {\n\t\t\t\tui.PrintErr(\"Main binary filename not available.\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmissingBinaries = true\n\t\t\tcontinue\n\t\t}\n\n\t\t// Skip well-known system mappings\n\t\tif m.Unsymbolizable() {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Skip mappings pointing to a source URL\n\t\tif m.BuildID == \"\" {\n\t\t\tif u, err := url.Parse(m.File); err == nil && u.IsAbs() && strings.Contains(strings.ToLower(u.Scheme), \"http\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tname := filepath.Base(m.File)\n\t\tif m.BuildID != \"\" {\n\t\t\tname += fmt.Sprintf(\" (build ID %s)\", m.BuildID)\n\t\t}\n\t\tf, err := obj.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol)\n\t\tif err != nil {\n\t\t\tui.PrintErr(\"Local symbolization failed for \", name, \": \", err)\n\t\t\tmissingBinaries = true\n\t\t\tcontinue\n\t\t}\n\t\tif fid := f.BuildID(); m.BuildID != \"\" && fid != \"\" && fid != m.BuildID {\n\t\t\tui.PrintErr(\"Local symbolization failed for \", name, \": build ID mismatch\")\n\t\t\tf.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tmt.segments[m] = f\n\t}\n\tif missingBinaries {\n\t\tui.PrintErr(\"Some binary filenames not available. Symbolization may be incomplete.\\n\" +\n\t\t\t\"Try setting PPROF_BINARY_PATH to the search path for local binaries.\")\n\t}\n\treturn mt, nil\n}", "func New(c *character.Character) *Maps {\n\n\tlog.Info(\"Generating new maps\")\n\n\tm := new(Maps)\n\tm.monsters = make([][]*monster.Monster, MaxVolcano)\n\tfor i := uint(0); i < MaxVolcano; i++ {\n\n\t\tnm := newMap(i) // create the new map with items\n\n\t\tswitch i {\n\t\tcase homeLevel:\n\t\t\tm.entrance = append(m.entrance, walkToEmpty(randMapCoord(), nm)) // TODO change this to be next to the dungeon entrance\n\t\tcase 1: // dungeon 0 has an entrance\n\t\t\tnm[height-1][width/2] = (Empty{})\n\t\t\tm.entrance = append(m.entrance, types.Coordinate{X: width / 2, Y: height - 2})\n\t\t\tm.monsters[i] = spawnMonsters(nm, i, true) // spawn monsters onto the map\n\t\tdefault:\n\t\t\t// Set the entrace for the maze to a random location\n\t\t\tm.entrance = append(m.entrance, walkToEmpty(randMapCoord(), nm))\n\t\t\tm.monsters[i] = spawnMonsters(nm, i, true) // spawn monsters onto the map\n\t\t}\n\n\t\tm.mazes = append(m.mazes, nm)\n\t}\n\tm.active = m.mazes[homeLevel]\n\tm.SpawnCharacter(m.entrance[homeLevel], c)\n\treturn m\n}", "func NewMap() *Map {\n\tm := &Map{}\n\n\tm.Map = make(EnvMap)\n\treturn m\n}", "func newRouter() *router {\n\treturn &router{\n\t\troots: make(map[string]*node),\n\t\thandlers: make(map[string]HandlerFunc),\n\t}\n}", "func newJsMapTask(funcSource string) (JSServerTask, error) {\n\tmapper := &jsMapTask{}\n\terr := mapper.Init(funcSource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Implementation of the 'emit()' callback:\n\tmapper.DefineNativeFunction(\"emit\", func(call otto.FunctionCall) otto.Value {\n\t\tkey, err1 := call.ArgumentList[0].Export()\n\t\tvalue, err2 := call.ArgumentList[1].Export()\n\t\tif err1 != nil || err2 != nil {\n\t\t\tpanic(fmt.Sprintf(\"Unsupported key or value types: emit(%#v,%#v): %v %v\", key, value, err1, err2))\n\t\t}\n\t\tmapper.output = append(mapper.output, &ViewRow{Key: key, Value: value})\n\t\treturn otto.UndefinedValue()\n\t})\n\n\tmapper.Before = func() {\n\t\tmapper.output = []*ViewRow{}\n\t}\n\tmapper.After = func(result otto.Value, err error) (interface{}, error) {\n\t\toutput := mapper.output\n\t\tmapper.output = nil\n\t\treturn output, err\n\t}\n\treturn mapper, nil\n}", "func New() *OMap {\n\treturn &OMap{\n\t\tkeys: make([]string, 0),\n\t\tbaseMap: make(map[string]interface{}, 0),\n\t}\n}", "func NewMap(a Area) *Map {\n\tm := make([][]*unsafe.Pointer, a.height)\n\n\tfor y := uint8(0); y < a.height; y++ {\n\t\tm[y] = make([]*unsafe.Pointer, a.width)\n\n\t\tfor x := uint8(0); x < a.width; x++ {\n\t\t\tvar emptyFieldPointer = unsafe.Pointer(uintptr(0))\n\t\t\tm[y][x] = &emptyFieldPointer\n\t\t}\n\t}\n\n\treturn &Map{\n\t\tfields: m,\n\t\tarea: a,\n\t}\n}", "func New() hctx.Map {\n\treturn hctx.Map{\n\t\tPathForKey: PathFor,\n\t}\n}", "func NewMap(key, value Type) *Map {\n\treturn &Map{\n\t\tkey: key,\n\t\tvalue: value,\n\t}\n}", "func NewMap(Rows, Cols int) *Map {\n\tm := &Map{\n\t\tRows: Rows, \n\t\tCols: Cols,\n\t\tWater: make(map[Location]bool),\n\t\titemGrid: make([]Item, Rows * Cols),\n\t}\n\tm.Reset()\n\treturn m\n}", "func NewMapRegistry() *MapRegistry {\n\treturn &MapRegistry{\n\t\ttracking: make(map[string]Compressor),\n\t}\n}", "func NewFlatMap(f FlatMapFunc, parallelism uint) *FlatMap {\n\tfmp := &FlatMap{\n\t\tf: f,\n\t\tin: make(chan interface{}),\n\t\tout: make(chan interface{}),\n\t\tparallelism: parallelism,\n\t}\n\tgo fmp.setup()\n\treturn fmp\n}", "func NewScanMap() *ScanMap {\n\treturn &ScanMap{\n\t\thashMap: map[Hash][]File{},\n\t}\n}", "func NewMapField(id int64, name string) *MapField {\n\treturn &MapField{\n\t\tID: id,\n\t\tName: name,\n\t\tRef: NewEmptyDynValue(),\n\t}\n}", "func NewDynamicMap(value interface{}) traits.Mapper {\n\treturn &baseMap{value, reflect.ValueOf(value)}\n}", "func (w *SimpleMapReduce) Map(mapFn MapFn) *SimpleMapReduce {\n w.mapFn = mapFn\n return w\n}", "func newPRMRemapIdentity(prefix, signedPrefix string) (*prmRemapIdentity, error) {\n\tif err := validateIdentityRemappingPrefix(prefix); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := validateIdentityRemappingPrefix(signedPrefix); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &prmRemapIdentity{\n\t\tprmCommon: prmCommon{Type: prmTypeRemapIdentity},\n\t\tPrefix: prefix,\n\t\tSignedPrefix: signedPrefix,\n\t}, nil\n}", "func (db *DatabaseModel) Mapper() *mapper.Mapper {\n\tdb.mutex.RLock()\n\tdefer db.mutex.RUnlock()\n\treturn db.mapper\n}", "func newRouter() *Router {\n\treturn &Router{routes: make([]*Route, 0)}\n}", "func newMap(src *map[string]interface{}) map[string]interface{} {\n\tdst := make(map[string]interface{})\n\tif src == nil {\n\t\treturn dst\n\t}\n\tfor k, v := range *src {\n\t\tif strings.HasPrefix(k, \"_\") {\n\t\t\tcontinue\n\t\t}\n\t\tdst[k] = v\n\t}\n\treturn dst\n}", "func spawnPartition2BrokerMapper(cid *ContextID, resolver brokerResolver) *partition2BrokerMapper {\n\tm := &partition2BrokerMapper{\n\t\tbaseCID: cid,\n\t\tresolver: resolver,\n\t\tworkerCreatedCh: make(chan partitionWorker),\n\t\tworkerClosedCh: make(chan partitionWorker),\n\t\tworkerReassignCh: make(chan partitionWorker),\n\t\tassignments: make(map[partitionWorker]brokerExecutor),\n\t\treferences: make(map[brokerExecutor]int),\n\t\tconnections: make(map[*Broker]brokerExecutor),\n\t\tclosingCh: make(chan none),\n\t}\n\tspawn(&m.wg, m.watch4Changes)\n\treturn m\n}", "func New(h crypto.Hash) hashers.MapHasher {\n\treturn &hasher{Hash: h}\n}", "func NewWith(keyComparator avltree.Comparator, valueComparator avltree.Comparator) *Map {\n\treturn &Map{\n\t\tforwardMap: *avltree.NewWithComparator(keyComparator),\n\t\tinverseMap: *avltree.NewWithComparator(valueComparator),\n\t\tkeyComparator: keyComparator,\n\t\tvalueComparator: valueComparator,\n\t}\n}", "func NewPostMap(ctx *middleware.Context, handler PostMapHandler) *PostMap {\n\treturn &PostMap{Context: ctx, Handler: handler}\n}", "func NewMapGenerator(size geo.Size) *MapGenerator {\n\treturn &MapGenerator{\n\t\tsize: size,\n\t\tarea: size.BuildArea(),\n\t}\n}", "func newJSONFormatterMapPool(full bool, keys map[string]string) JSONFormatterObjectPool {\n\treturn &jsonFormatterMapPool{\n\t\tfull: full, name: keys[\"name\"], time: keys[\"time\"], level: keys[\"level\"],\n\t\tmessage: keys[\"message\"], fields: keys[\"fields\"], caller: keys[\"caller\"], stack: keys[\"stack\"],\n\t}\n}", "func NewFromMap(v map[string]interface{}) Object {\n\treturn v\n}", "func NewParkingMap() *ParkingMap {\n\treturn &ParkingMap{\n\t\tEntry: []ParkingMapEntry{},\n\t}\n}", "func NewMap(values ...MalType) Map {\n\timm := immutable.NewMap(hasher{})\n\tif len(values) > 0 {\n\t\tb := immutable.NewMapBuilder(imm)\n\t\tfor i := 0; i < len(values); i += 2 {\n\t\t\tb.Set(values[i], values[i+1])\n\t\t}\n\t\timm = b.Map()\n\t}\n\treturn Map{Imm: imm}\n}", "func (c *Codec) NewMapNode(base NodeBase) (Node, error) {\n\tn := &MapNode{\n\t\tDirNodeBase: &DirNodeBase{\n\t\t\tNodeBase: base,\n\t\t},\n\t\tKeyType: base.Type.Key(),\n\t}\n\treturn n, errors.Wrap(n.AnalyseElemNode(n, c), \"analysing map element node\")\n}", "func NewMapWriter(formatStrategies *generic.FormatStrategies, levelSeparator string, target map[string]string) *generic.KeyValueWriter {\n\tif levelSeparator == \"\" {\n\t\tlevelSeparator = \".\"\n\t}\n\n\tif formatStrategies == nil {\n\t\tformatStrategies = &generic.DefaultFormatConvention\n\t}\n\n\treturn &generic.KeyValueWriter{\n\t\tKeyValueStorage: mapStorage(target),\n\t\tFormatStrategies: *formatStrategies,\n\t\tLevelSeparator: levelSeparator,\n\t}\n}", "func newOffsetInjector(c *cluster) *offsetInjector {\n\treturn &offsetInjector{c: c}\n}", "func (db *OlricDB) NewDMap(name string) *DMap {\n\treturn &DMap{\n\t\tname: name,\n\t\tdb: db,\n\t}\n}", "func NewMapDBMock() Repository {\n\t// our MapDB already woks as mock;) well, let's just use it for tests\n\t// if we will changed realization of Repository using real DB,\n\t// current MapDB will be moved here\n\trepo := NewMapDB()\n\n\t// fill repo with data\n\tfor _, item := range testItems {\n\t\trepo.CreateItem(item)\n\t}\n\n\treturn repo\n}" ]
[ "0.77114105", "0.76382065", "0.7555538", "0.6776838", "0.6700106", "0.6485153", "0.63133055", "0.6229891", "0.61909276", "0.61140144", "0.6087086", "0.6048961", "0.59599954", "0.59437037", "0.59334576", "0.58699393", "0.5846827", "0.58250123", "0.5816489", "0.58098704", "0.5783469", "0.57118446", "0.56995505", "0.5694767", "0.5653389", "0.5611892", "0.5590244", "0.5589249", "0.5581095", "0.5580526", "0.55726975", "0.55671173", "0.5548489", "0.5542197", "0.55278033", "0.55008394", "0.5499333", "0.54975224", "0.5462557", "0.5419172", "0.54010534", "0.5388347", "0.537426", "0.53588885", "0.52985966", "0.5295673", "0.52945495", "0.5294002", "0.5287574", "0.528233", "0.5280459", "0.5276675", "0.5258099", "0.52385926", "0.5237592", "0.52374953", "0.5234144", "0.5229535", "0.5219228", "0.5202028", "0.5200568", "0.5197695", "0.51792663", "0.51703364", "0.5165296", "0.51591367", "0.51465946", "0.51430714", "0.5139792", "0.51370543", "0.5135575", "0.51251835", "0.5121931", "0.5118189", "0.51056105", "0.5097526", "0.5093105", "0.50811744", "0.5079933", "0.50585306", "0.5057969", "0.5050767", "0.504806", "0.5045812", "0.5033142", "0.5028494", "0.50203544", "0.50077116", "0.4999969", "0.4994261", "0.49889916", "0.49651897", "0.4955335", "0.49521595", "0.49520865", "0.49476743", "0.49455374", "0.49389192", "0.49349177", "0.49305138" ]
0.7831975
0
start begins processing the iterator.
func (m *mapper) start() { m.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ, m.executor.min, m.executor.max, m.executor.interval) go m.run() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (w *Walker) startProcessing() {\n\tdoStart := false\n\tw.pipe.RLock()\n\tif w.pipe.filters == nil { // no processing up to now => start with initial node\n\t\tw.pipe.pushSync(w.initial, 0) // input is buffered, will return immediately\n\t\tdoStart = true // yes, we will have to start the pipeline\n\t}\n\tw.pipe.RUnlock()\n\tif doStart { // ok to be outside mutex as other goroutines will check pipe.empty()\n\t\tw.pipe.startProcessing() // must be outside of mutex lock\n\t}\n}", "func (iterator *Iterator) Begin() {\n\titerator.iterator.Begin()\n}", "func (w *Worker) startReader() {\n\tdump, err := os.Open(w.InputFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdecoder := xml.NewDecoder(dump)\n\n\tfor {\n\t\tt, _ := decoder.Token()\n\t\tif t == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Inspect the type of the token just read.\n\t\tswitch se := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\tif se.Name.Local == \"page\" {\n\t\t\t\tvar p Page\n\t\t\t\tdecoder.DecodeElement(&p, &se)\n\n\t\t\t\tfound := find(seen, p.Title)\n\t\t\t\tif found {\n\t\t\t\t\tlog.Printf(\"Duplicate title: %s. Skipping...\", p.Title)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tw.InPage <- &p\n\t\t\t}\n\t\t}\n\t}\n\n\t// Close the channels associated with reading/writing\n\tclose(w.InPage)\n\tlog.Println(\"Reader done\")\n}", "func (p *literalProcessor) start() { go p.run() }", "func (r *reducer) start() {\n\tfor _, m := range r.mappers {\n\t\tm.start()\n\t}\n\tgo r.run()\n}", "func (graphMinion *graphMinion) start() {\n\tgo func() {\n\t\tdefer graphMinion.wg.Done()\n\t\tfor {\n\n\t\t\t// pull reads from queue until done\n\t\t\tmappingData, ok := <-graphMinion.inputChannel\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif mappingData == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// increment the nodes contained in the mapping window\n\t\t\tmisc.ErrorCheck(graphMinion.graph.IncrementSubPath(mappingData.ContainedNodes, mappingData.Freq))\n\t\t}\n\t}()\n}", "func (pr *PeriodicReader) start(ctx context.Context) {\n\tdefer pr.wait.Done()\n\tticker := time.NewTicker(pr.interval)\n\tfor {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tif err := pr.collectWithTimeout(ctx, pr.exporter.ExportMetrics); err != nil {\n\t\t\t\totel.Handle(err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *BasecluListener) EnterIterator(ctx *IteratorContext) {}", "func (root *mTreap) start(mask, match treapIterType) treapIter {\n\tf := treapFilter(mask, match)\n\treturn treapIter{f, root.treap.findMinimal(f)}\n}", "func (mi *MinerIndex) start() {\n\tdefer func() { mi.finished <- struct{}{} }()\n\n\tif err := mi.updateOnChainIndex(); err != nil {\n\t\tlog.Errorf(\"error on initial updating miner index: %s\", err)\n\t}\n\tmi.chMeta <- struct{}{}\n\tfor {\n\t\tselect {\n\t\tcase <-mi.ctx.Done():\n\t\t\tlog.Info(\"graceful shutdown of background miner index\")\n\t\t\treturn\n\t\tcase <-time.After(metadataRefreshInterval):\n\t\t\tselect {\n\t\t\tcase mi.chMeta <- struct{}{}:\n\t\t\tdefault:\n\t\t\t\tlog.Info(\"skipping meta index update since it's busy\")\n\t\t\t}\n\t\tcase <-time.After(util.AvgBlockTime):\n\t\t\tif err := mi.updateOnChainIndex(); err != nil {\n\t\t\t\tlog.Errorf(\"error when updating miner index: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func (t *tcParser) start() {\n\tt.logger.Info(\"start(): Starting the tc_reader.\")\n\tconfigTemplate := \"tc_reader configuration: tcCmdPath: %s parseInterval: %d tcQdiscStats: %s tcClassStats: %s ifaces: %s userNameClass: %v\"\n\tt.logIfDebug(fmt.Sprintf(configTemplate, t.options.tcCmdPath(), t.options.parseInterval(), t.options.tcQdiscStats(), t.options.tcClassStats(), t.options.ifaces(), t.options.userNameClass()))\n\t// One initial run of TC execution and parsing.\n\tt.parseTc()\n\n\tgo func() {\n\t\tfor range time.Tick(time.Duration(t.options.parseInterval()) * time.Second) {\n\t\t\tt.parseTc()\n\t\t}\n\t}()\n}", "func (r *reaper) start() {\n\tgo r.runLoop()\n}", "func (w *Processor) start() {\n\tfor {\n\t\tselect {\n\t\tcase job, ok := <-w.jobQueue:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.limiter <- empty{}\n\n\t\t\t// Spawn a worker goroutine.\n\t\t\tgo func() {\n\t\t\t\tif err := job.Run(); err != nil {\n\t\t\t\t\tw.jobErrorHandler(err)\n\t\t\t\t}\n\t\t\t\t<-w.limiter\n\t\t\t}()\n\t\tcase <-w.stop:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *Communicator) startProcessing() bool {\n\tc.runningLock.RLock()\n\tdefer c.runningLock.RUnlock()\n\tif c.running {\n\t\tc.pending.Add(1)\n\t}\n\treturn c.running\n}", "func (w *worker) start() {\n\tatomic.StoreInt32(&w.running, 1)\n\tw.startCh <- struct{}{}\n}", "func (i *Ingester) Start(ctx context.Context) error {\n\tconcurrentProc := make(chan bool, nConcurrentProcessors)\n\tresultChan, err := i.getInputChannel(ctx)\n\tif err != nil {\n\t\treturn sklog.FmtErrorf(\"Error retrieving input channel: %s\", err)\n\t}\n\n\t// Continuously catch events from all input sources and push the data to the processor.\n\tgo func(doneCh <-chan bool) {\n\t\tvar resultFile ResultFileLocation = nil\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase resultFile = <-resultChan:\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// get a slot in line to call Process\n\t\t\tconcurrentProc <- true\n\t\t\tgo func(resultFile ResultFileLocation) {\n\t\t\t\tdefer func() { <-concurrentProc }()\n\t\t\t\ti.processResult(ctx, resultFile)\n\t\t\t}(resultFile)\n\t\t}\n\t}(i.doneCh)\n\treturn nil\n}", "func (f *filtererProcessor) Start(ctx context.Context) {\n\tctx = f.StartInternal(ctx, filtererProcName)\n\tf.input.Start(ctx)\n}", "func (p *Pipe) start() {\n\tp.cancel = make(chan struct{})\n\terrcList := make([]<-chan error, 0, 1+len(p.processors)+len(p.sinks))\n\t// start pump\n\tout, errc := p.pump.run(p.cancel, p.ID(), p.provide, p.consume, p.sampleRate, p.metric)\n\terrcList = append(errcList, errc)\n\n\t// start chained processesing\n\tfor _, proc := range p.processors {\n\t\tout, errc = proc.run(p.cancel, p.ID(), out, p.sampleRate, p.metric)\n\t\terrcList = append(errcList, errc)\n\t}\n\n\tsinkErrcList := p.broadcastToSinks(out)\n\terrcList = append(errcList, sinkErrcList...)\n\tp.errc = mergeErrors(errcList...)\n}", "func (s *seriesValueGenerator) Start() error { return nil }", "func (er *BufferedExchangeReporter) Start() {\n\n}", "func (w *watcher) start(results <-chan api.WatchEvent) {\n\tlog.WithField(\"Name\", w.name).Info(\"start watcher\")\n\tw.watcherRunningWg.Add(1)\n\tgo w.run(results)\n}", "func (actor *Actor) start(idx, n int) {\n\tif idx == n {\n\t\treturn\n\t}\n\n\t// worker number starts from 1\n\tgo actor.work(idx + 1)\n\tactor.start(idx+1, n)\n}", "func nextStart(p *xml.Decoder) (elem xml.StartElement, err error) {\n\tfor {\n\t\tvar t xml.Token\n\t\tt, err = p.Token()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tswitch t := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\telem = t\n\t\t\treturn\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}", "func (hm HashMap) StartIter() (iter hashMapIter) {\n\titer = hashMapIter{&hm, 0, hm.data[0].Front()}\n\titer.findNext()\n\treturn\n}", "func (this *service) processor() {\n\tthis.logger.Debugf(\"(%s) Starting processor\", this.cid())\n\n\tthis.wgStarted.Done()\n\tdefer this.wgStopped.Done()\n\n\tfor {\n\t\t// 1. Find out what message is next and the size of the message\n\t\tmtype, total, err := this.peekMessageSize()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error peeking next message size: %v\", this.cid(), err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tmsg, n, err := this.peekMessage(mtype, total)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error peeking next message: %v\", this.cid(), err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t//this.logger.Debugf(\"(%s) Received: %s\", this.cid(), msg)\n\n\t\tthis.inStat.increment(int64(n))\n\n\t\t// 5. Process the read message\n\t\terr = this.processIncoming(msg)\n\t\tif err != nil {\n\t\t\tif err != errDisconnect {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error processing %s: %v\", this.cid(), msg.Name(), err)\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// 7. We should commit the bytes in the buffer so we can move on\n\t\t_, err = this.in.ReadCommit(total)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error committing %d read bytes: %v\", this.cid(), total, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// 7. Check to see if done is closed, if so, exit\n\t\tif this.isDone() && this.in.Len() == 0 {\n\t\t\treturn\n\t\t}\n\n\t\t//if this.inStat.msgs%1000 == 0 {\n\t\t//\tthis.logger.Debugf(\"(%s) Going to process message %d\", this.cid(), this.inStat.msgs)\n\t\t//}\n\t}\n}", "func (c *Connection) nextStart() (xml.StartElement, error) {\n\tfor {\n\t\tt, err := c.decoder.Token()\n\t\tif err != nil || t == nil {\n\t\t\treturn xml.StartElement{}, err\n\t\t}\n\t\tswitch t := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\treturn t, nil\n\t\t}\n\t}\n}", "func (e *binaryExprEvaluator) start() {\n\te.lhs.start()\n\te.rhs.start()\n\tgo e.run()\n}", "func (b *Basic) start() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Basic.start -- \", r)\n\t\t\tgo b.start()\n\t\t}\n\t}()\n\n\tfor rec := range b.in {\n\t\tif rec.flush != nil {\n\t\t\tb.flush(rec.flush)\n\t\t} else {\n\t\t\terr := b.w.Write(rec)\n\t\t\tif err != nil {\n\t\t\t\tb.incErrorCounter()\n\t\t\t\trec.Logger().Logr().ReportError(err)\n\t\t\t} else {\n\t\t\t\tb.incLoggedCounter()\n\t\t\t}\n\t\t}\n\t}\n\tclose(b.done)\n}", "func (m *Matches) Start() {\n\tfor {\n\t\tm.process(m.Queue.Poll())\n\t}\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t// Your code here.\n\n\tif seq < px.Min() {\n\t\treturn\n\t}\n\t//args := RequestArgs{1, nil, 3, \"12\"}\n\tgo func(v interface{}, seq int) {\n\t\tnode, ok := px.prepareStatus.Find(seq)\n\t\tfor !ok || !node.State.Done {\n\t\t\t//choose unique n\n\t\t\tn := (int(time.Now().Unix()) << 5) | px.me\n\t\t\taok := false\n\t\t\t//send prepare to all\n\t\t\tvalue, pok := prepare(n, px, v, seq)\n\t\t\t//send accept to all\n\t\t\tif pok {\n\t\t\t\taok = accept(n, px, value, seq)\n\t\t\t\t//log.Printf(\"the seq is %d,the number is %d and the aok is %t\", seq, n, aok)\n\t\t\t}\n\t\t\tif aok {\n\t\t\t\t//send decide to all\n\t\t\t\tdecided(seq, px, value)\n\t\t\t}\n\t\t\t//time.Sleep(20 * time.Millisecond)\n\t\t\tnode, ok = px.prepareStatus.Find(seq)\n\t\t}\n\t}(v, seq)\n\n}", "func (sp *StreamPool) Start() {\n\tfor {\n\t\tselect {\n\t\tcase <-sp.quitCh:\n\t\t\tsp.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *CancelableScanner) Start() *CancelableScanner {\n\tgo func() {\n\t\tfor s.Scan() {\n\t\t\ts.data <- s.Text()\n\t\t}\n\t\tif err := s.Err(); err != nil {\n\t\t\ts.err <- err\n\t\t}\n\t\tclose(s.data)\n\t\tclose(s.err)\n\t}()\n\treturn s\n}", "func (l *lexer) run() {\n\tfor state := lexStart; state != nil; {\n\t\tstate = state(l)\n\t}\n}", "func (ec *EventsCache) start() {\n\tsafego.RunWithRestart(func() {\n\t\tfor {\n\t\t\tif ec.closed {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcf := <-ec.originalCh\n\t\t\tec.put(cf.destinationId, cf.eventId, cf.eventFact)\n\t\t}\n\t})\n\n\tsafego.RunWithRestart(func() {\n\t\tfor {\n\t\t\tif ec.closed {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcf := <-ec.succeedCh\n\t\t\tec.succeed(cf.destinationId, cf.eventId, cf.processed, cf.table, cf.types)\n\t\t}\n\t})\n\n\tsafego.RunWithRestart(func() {\n\t\tfor {\n\t\t\tif ec.closed {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcf := <-ec.failedCh\n\t\t\tec.error(cf.destinationId, cf.eventId, cf.error)\n\t\t}\n\t})\n}", "func (o *influxDBLogger) start() error {\n\treturn o.tick()\n}", "func (l *Log) start(ch chan<- *Event) {\n\tdefer close(ch)\n\n\tl.Log.Debug(\"enter\")\n\tdefer l.Log.Debug(\"exit\")\n\n\tvar start = l.StartTime.UnixNano() / int64(time.Millisecond)\n\tvar nextToken *string\n\tvar err error\n\n\tfor {\n\t\tl.Log.WithField(\"start\", start).Debug(\"request\")\n\t\tnextToken, start, err = l.fetch(nextToken, start, ch)\n\n\t\tif err != nil {\n\t\t\tl.err = fmt.Errorf(\"log %q: %s\", l.GroupName, err)\n\t\t\tbreak\n\t\t}\n\n\t\tif nextToken == nil && l.Follow {\n\t\t\ttime.Sleep(l.PollInterval)\n\t\t\tl.Log.WithField(\"start\", start).Debug(\"poll\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif nextToken == nil {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (e *ElkTimeseriesForwarder) start() {\n\n\tlog.L.Infof(\"Starting event forwarder for %v\", e.index())\n\tticker := time.NewTicker(e.interval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t//send it off\n\t\t\tlog.L.Debugf(\"Sending bulk ELK update for %v\", e.index())\n\n\t\t\tgo forward(e.index(), e.url, e.buffer)\n\t\t\te.buffer = []ElkBulkUpdateItem{}\n\n\t\tcase event := <-e.incomingChannel:\n\t\t\te.bufferevent(event)\n\t\t}\n\t}\n}", "func (c *Collector) Start() {\n\tgo c.Source.Start()\n\tc.collect()\n}", "func (fnc *FileNameConsumer) Start() {\n\tgo func() {\n\t\tfnc.wg.Add(1)\n\t\tdefer fnc.wg.Done()\n\n\t\tfor filename := range fnc.incoming {\n\t\t\tfnc.wg.Add(1)\n\t\t\tgo fnc.consume(filename)\n\t\t}\n\t}()\n}", "func (g *Gosmonaut) Start(\n\ttypes OSMTypeSet,\n\tfuncEntityNeeded func(OSMType, OSMTags) bool,\n) {\n\t// Block until previous run finished\n\tg.lock.Lock()\n\tg.stream = make(chan osmPair, entitiesPerPrimitiveBlock)\n\n\t// Init vars\n\tg.funcEntityNeeded = funcEntityNeeded\n\tg.types = types\n\n\tgo func() {\n\t\t// Decode\n\t\tg.decode()\n\n\t\t// Finish\n\t\tclose(g.stream)\n\t\tg.lock.Unlock()\n\t}()\n}", "func (t *Transport) start(msg Message, stream *Stream, out []byte) (n int) {\n\tatomic.AddUint64(&t.nTxstart, 1)\n\tn = tag2cbor(tagCborPrefix, out) // prefix\n\tn += arrayStart(out[n:]) // 0x9f (start stream as cbor array)\n\tn += t.framepkt(msg, stream, out[n:]) // packet\n\treturn n\n}", "func (s *Index) start() {\n\tdefer close(s.finished)\n\tif err := s.updateIndex(); err != nil {\n\t\tlog.Errorf(\"error on first updating slashing history: %s\", err)\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-s.ctx.Done():\n\t\t\tlog.Info(\"graceful shutdown of background slashing updater\")\n\t\t\treturn\n\t\tcase <-time.After(util.AvgBlockTime):\n\t\t\tif err := s.updateIndex(); err != nil {\n\t\t\t\tlog.Errorf(\"error when updating slashing history: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func nextStart(p *xml.Decoder) (xml.StartElement, error) {\n\tfor {\n\t\tt, err := p.Token()\n\t\tif err == io.EOF {\n\t\t\treturn xml.StartElement{}, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn xml.StartElement{}, fmt.Errorf(\"nextStart %s\", err)\n\t\t}\n\t\tswitch t := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\treturn t, nil\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t// Your code here.\n\t// TBD: check seq < px.Min() in px.Propose? Start should return immidiately\n\tgo px.Propose(seq, v)\n}", "func (collector *Collector) Start() {\n\t// Begin our internal processing first\n\tgo collector.process()\n\n\t// Start the prospector to start collecting data\n\tcollector.prospector.Start()\n}", "func (st *buildStatus) start() {\n\tsetStatus(st.builderRev, st)\n\tgo func() {\n\t\terr := st.build()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(st, \"\\n\\nError: %v\\n\", err)\n\t\t\tlog.Println(st.builderRev, \"failed:\", err)\n\t\t}\n\t\tst.setDone(err == nil)\n\t\tst.buildRecord().put()\n\t\tmarkDone(st.builderRev)\n\t}()\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\tif seq >= px.Min() {\n\t\tpx.mu.Lock()\n\t\tpx.mu.Unlock()\n\t\tpx.proposer(seq, v) // THIS IS OUR CODE - changed from go\n\t}\n\n}", "func (t *FakeObjectTracker) Start() error {\n\tif t.FakeWatcher == nil {\n\t\treturn errors.New(\"tracker has no watch support\")\n\t}\n\n\tfor event := range t.ResultChan() {\n\t\tevent := event.DeepCopy() // passing a deep copy to avoid race.\n\t\tt.dispatch(event)\n\t}\n\n\treturn nil\n}", "func (p *Processor) Start() {\n\tp.setDefaults()\n\tdispatcher := p.dispatcher\n\tdispatcher.Run()\n\tstopChan := p.stopChan\n\nLOOP:\n\tfor {\n\t\tbuffer := p.byteArrayPool.Get()\n\t\trlen, remote, err := p.Conn.ReadFrom(buffer)\n\t\tif err != nil {\n\t\t\tif p.isCloseError(err) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tpanic(err)\n\t\t}\n\t\t_, _ = rlen, remote\n\t\tdispatcher.SubmitJob(buffer)\n\t\tselect {\n\t\tcase <-stopChan:\n\t\t\tbreak LOOP\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tdispatcher.Stop()\n}", "func (tr *tableReader) Start(ctx context.Context) {\n\tif tr.FlowCtx.Txn == nil {\n\t\tlog.Fatalf(ctx, \"tableReader outside of txn\")\n\t}\n\n\tctx = tr.StartInternal(ctx, tableReaderProcName)\n\n\tlimitBatches := !tr.parallelize\n\tlog.VEventf(ctx, 1, \"starting scan with limitBatches %t\", limitBatches)\n\tvar err error\n\tif tr.maxTimestampAge == 0 {\n\t\terr = tr.fetcher.StartScan(\n\t\t\tctx, tr.FlowCtx.Txn, tr.spans, limitBatches, tr.limitHint,\n\t\t\ttr.FlowCtx.TraceKV,\n\t\t\ttr.EvalCtx.TestingKnobs.ForceProductionBatchSizes,\n\t\t)\n\t} else {\n\t\tinitialTS := tr.FlowCtx.Txn.ReadTimestamp()\n\t\terr = tr.fetcher.StartInconsistentScan(\n\t\t\tctx, tr.FlowCtx.Cfg.DB, initialTS, tr.maxTimestampAge, tr.spans,\n\t\t\tlimitBatches, tr.limitHint, tr.FlowCtx.TraceKV,\n\t\t\ttr.EvalCtx.TestingKnobs.ForceProductionBatchSizes,\n\t\t)\n\t}\n\n\tif err != nil {\n\t\ttr.MoveToDraining(err)\n\t}\n}", "func (_m *MarkerConsumer) Start() {\n\t_m.Called()\n}", "func (e *quotaEvaluator) start() {\n\tdefer utilruntime.HandleCrash()\n\n\tfor i := 0; i < e.workers; i++ {\n\t\tgo wait.Until(e.doWork, time.Second, e.stopCh)\n\t}\n}", "func (p *SingleLineParser) run() {\n\tfor input := range p.inputChan {\n\t\tp.process(input)\n\t}\n\tp.lineHandler.Stop()\n}", "func (x *x509Handler) start() {\n\tgo x.handleUpdates()\n}", "func (st *buildStatus) start() {\n\tsetStatus(st.BuilderRev, st)\n\tgo func() {\n\t\terr := st.build()\n\t\tif err == errSkipBuildDueToDeps {\n\t\t\tst.setDone(true)\n\t\t} else {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(st, \"\\n\\nError: %v\\n\", err)\n\t\t\t\tlog.Println(st.BuilderRev, \"failed:\", err)\n\t\t\t}\n\t\t\tst.setDone(err == nil)\n\t\t\tpool.CoordinatorProcess().PutBuildRecord(st.buildRecord())\n\t\t}\n\t\tmarkDone(st.BuilderRev)\n\t}()\n}", "func (e *exec) start(ctx context.Context) {\n\t// Lock the mutex to prevent race conditions with Stop\n\te.execMutex.Lock()\n\tdefer e.execMutex.Unlock()\n\n\t// Do the startup sequence once until the shutdown sequence resets\n\te.startOnce.Do(func() {\n\t\tdefer func() {\n\t\t\t// reset stopOnce so the shutdown sequence can happen again\n\t\t\te.stopOnce = sync.Once{}\n\t\t}()\n\t\te.startFn(ctx)\n\t})\n}", "func (inNode *InputNode) Start() {\n}", "func (lw *LazyWriter) Start() {\n\tif lw.state == running {\n\t\treturn\n\t}\n\n\tlw.state = running\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-lw.ticker.C:\n\t\t\t\tlw.processWriter()\n\t\t\t}\n\t\t}\n\t}()\n}", "func (f *Processor) Start() {\n\tf.orderedInserter.Start(1)\n\tf.unorderedInserters.Start(f.cfg.MaxUnorderedInsertions)\n}", "func (t *Task) start(runnable func() error) {\n\tt.group.mutex.Lock()\n\tt.group.running[t] = true\n\tt.state = stateRunning\n\tt.group.mutex.Unlock()\n\n\tgo t.run(runnable)\n}", "func (c *MsgConnection) beginEventProcessing(ctx context.Context) {\n\tvar (\n\t\terr error\n\t\tbuf = make([]byte, maxMessageSize)\n\t\tn int\n\t)\n\tfor {\n\t\tn, err = c.r.Read(buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif c.session == nil {\n\t\t\terr = ErrTriedToSetupExistingConn\n\t\t\tbreak\n\t\t}\n\t\tif n > 0 {\n\t\t\tnewbuf := make([]byte, n)\n\t\t\tcopy(newbuf, buf)\n\n\t\t\t// Create a new requestId for context\n\t\t\tc.publish(log.WithNewRequestID(ctx), newbuf)\n\t\t}\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif cerr := c.Close(); cerr != ErrAlreadyClosed {\n\t\tc.networker.publishClosingConnection(ConnectionWithErr{c, err})\n\t}\n}", "func (c *ReadgroupsetsCoveragebucketsListCall) Start(start int64) *ReadgroupsetsCoveragebucketsListCall {\n\tc.urlParams_.Set(\"start\", fmt.Sprint(start))\n\treturn c\n}", "func (this *DeployLock) start() {\n\tthis.mutex.Lock()\n\tthis.numStarted++\n\tthis.mutex.Unlock()\n}", "func (g *Game) start() {\n\tswitch g.state {\n\tcase gameStarted:\n\t\treturn\n\tdefault:\n\t\tg.state = gameStarted\n\t\tg.getDot()\n\t\tg.getPiece()\n\t\tg.placePiece()\n\t\tg.fillMatrix()\n\n\t}\n}", "func (r *InMemorySourceReader) Begin() {\n\tr.iStack.PushBack(r.i)\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t//DPrintf(\"Start(%d, %v)\\n\", seq, v)\n\tgo px.propose(seq, v)\n}", "func (c *Collection) Start() {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\tc.start()\n}", "func startCrawling(start string) {\n\tcheckIndexPresence()\n\n\tvar wg sync.WaitGroup\n\tnoOfWorkers := 10\n\n\t// Send first url to the channel\n\tgo func(s string) {\n\t\tqueue <- s\n\t}(start)\n\n\t// Create worker pool with noOfWorkers workers\n\twg.Add(noOfWorkers)\n\tfor i := 1; i <= noOfWorkers; i++ {\n\t\tgo worker(&wg, i)\n\t}\n\twg.Wait()\n}", "func (gq *Dispatch) next() {\n for true {\n // Attempt to start processing the file.\n gq.pLock.Lock()\n if gq.processing >= gq.MaxGo {\n gq.waitingToRun = true\n gq.nextWait.Add(1)\n gq.pLock.Unlock()\n gq.nextWait.Wait()\n continue\n }\n // Keep the books and reset wait time before unlocking.\n gq.processing++\n gq.pLock.Unlock()\n\n // Get an element from the queue.\n gq.qLock.Lock()\n var wrapper = gq.queue.Dequeue().(queues.RegisteredTask)\n gq.qLock.Unlock()\n\n // Begin processing and asyncronously return.\n //var task = taskelm.Value.(dispatchTaskWrapper)\n var task = wrapper.Func()\n go task(wrapper.Id())\n return\n }\n}", "func (r *RecordStream) Start() {\n\tif r.state == idle {\n\t\tr.err = nil\n\t\tr.c.c.Request(&proto.FlushRecordStream{StreamIndex: r.index}, nil)\n\t\tr.c.c.Request(&proto.CorkRecordStream{StreamIndex: r.index, Corked: false}, nil)\n\t\tr.state = running\n\t}\n}", "func (s *schedule) start() {\n\tif s.running {\n\t\treturn\n\t}\n\n\ts.running = true\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.ticker.C:\n\t\t\t\tcallJobFuncWithParams(s.jobFunc, s.jobParams)\n\t\t\tcase <-s.stopCh:\n\t\t\t\ts.ticker.Stop()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (j *janitor) start(wg *sync.WaitGroup) {\n\twg.Add(1)\n\ttimer := time.NewTimer(j.avgInterval) // randomize this interval with margin of 1s\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-j.done:\n\t\t\t\tj.logger.Debug(\"Janitor done\")\n\t\t\t\treturn\n\t\t\tcase <-timer.C:\n\t\t\t\tj.exec()\n\t\t\t\ttimer.Reset(j.avgInterval)\n\t\t\t}\n\t\t}\n\t}()\n}", "func (f *FakeOutput) Start(_ operator.Persister) error { return nil }", "func (fc *appendFlowControl) start(res *resolution, recv func() (*pb.AppendRequest, error)) func() (*pb.AppendRequest, error) {\n\tfc.reset(res, timeNow().UnixNano()/1e6)\n\tfc.ticker = time.NewTicker(flowControlQuantum)\n\n\t// Pump calls to |recv| in a goroutine, as they may block indefinitely.\n\t// We expect that |recv| is tied to a Context which will be cancelled\n\t// upon the returned closure returning an error, so these don't actually\n\t// hang around indefinitely.\n\tgo func(ch chan<- appendChunk) {\n\t\tfor {\n\t\t\tvar req, err = recv()\n\t\t\tch <- appendChunk{req: req, err: err}\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(fc.chunkCh)\n\n\treturn fc.recv\n}", "func (dr *NullReader) Start(reader io.ReadCloser) (err error) {\n\told := atomic.SwapInt32(&dr.atom, 1)\n\tif old == 1 {\n\t\treturn fmt.Errorf(\"Start already called\")\n\t}\n\n\tdr.r = reader\n\tdefer reader.Close()\n\n\tbuf := make([]byte, defaultBufferSize)\n\n\tfor {\n\t\t_, err = dr.r.Read(buf)\n\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tatomic.SwapInt32(&dr.atom, 0)\n\treturn\n}", "func (f *Footer) StartIterator(startKeyIncl, endKeyExcl []byte,\n\titeratorOptions IteratorOptions) (Iterator, error) {\n\t_, ss := f.segmentLocs()\n\tif ss == nil {\n\t\tf.DecRef()\n\t\treturn nil, nil\n\t}\n\n\titer, err := ss.StartIterator(startKeyIncl, endKeyExcl, iteratorOptions)\n\tif err != nil || iter == nil {\n\t\tf.DecRef()\n\t\treturn nil, err\n\t}\n\n\tinitCloser, ok := iter.(InitCloser)\n\tif !ok || initCloser == nil {\n\t\titer.Close()\n\t\tf.DecRef()\n\t\treturn nil, ErrUnexpected\n\t}\n\n\terr = initCloser.InitCloser(f)\n\tif err != nil {\n\t\titer.Close()\n\t\tf.DecRef()\n\t\treturn nil, err\n\t}\n\n\treturn iter, nil\n}", "func (_m *MockCompactionPlanContext) start() {\n\t_m.Called()\n}", "func (r *Resizer) Start() {\n\tc := r.kafkaConsumer.Consumer\n\tc.SubscribeTopics([]string{r.imageResizeTopic}, nil)\n\n\tfor {\n\t\tmsg, err := c.ReadMessage(-1)\n\t\tif err == nil {\n\t\t\tr.logger.Debug(\"Recived imaged resize request: \", string(msg.Key))\n\t\t\timage := &resources.Image{}\n\t\t\terr := json.Unmarshal(msg.Value, image)\n\t\t\tif err != nil {\n\t\t\t\tr.logger.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Resize the image:\n\t\t\tr.resizeImage(image)\n\t\t} else {\n\t\t\t// The client will automatically try to recover from all errors.\n\t\t\tr.logger.Error(\"Consumer error: %v (%v)\\n\", err, msg)\n\t\t}\n\t}\n}", "func (dt *discoveryTool) start() {\n\tvar (\n\t\terr error\n\t\tdata map[string]interface{}\n\t\tlastData map[string]interface{}\n\t)\n\n\t//Initializing channel\n\tdt.done = make(chan bool)\n\n\tdt.wg.Add(1)\n\tgo func() {\n\t\tdefer dt.wg.Done()\n\n\t\tticker := time.NewTicker(dt.interval)\n\t\tdefer ticker.Stop()\n\n\t\tlmtr := limiter.NewRateLimiter(dt.rateLimit, time.Second)\n\t\tdefer lmtr.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-dt.done:\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tdata, err = dt.getDiscoveryDataAcrossRegions(lmtr.C)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdt.lg.Errorf(\"Can't get discovery data: %v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif !reflect.DeepEqual(data, lastData) {\n\t\t\t\t\tlastData = nil\n\t\t\t\t\tlastData = map[string]interface{}{}\n\t\t\t\t\tfor k, v := range data {\n\t\t\t\t\t\tlastData[k] = v\n\t\t\t\t\t}\n\n\t\t\t\t\t//send discovery data in blocking mode\n\t\t\t\t\tdt.dataChan <- data\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func (this *MultiMap) Begin() KvIterator {\n\treturn this.First()\n}", "func (p *parser) run() {\n\tfor parserState := parseStart; parserState != nil; {\n\t\tparserState = parserState(p)\n\t}\n\tclose(p.records)\n}", "func (s *Stats) Start(done <-chan bool) {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ts.scrape()\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tt := time.NewTicker(10 * time.Second)\n\t\tdefer t.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tcase <-t.C:\n\t\t\t\tif err := s.scan(); err != nil {\n\t\t\t\t\tlog.Debug(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func (proc *schedulerProcess) start() {\t\n\tproc.server.Addr = fmt.Sprintf(\"%s:%d\", localIP4String(), nextTcpPort())\n\tproc.processId = newSchedProcID(proc.server.Addr)\n\tproc.registerEventHandlers()\n\tgo proc.server.ListenAndServe()\n}", "func (m *mapper) run() {\n\tfor m.itr.NextIterval() {\n\t\tm.fn(m.itr, m)\n\t}\n\tclose(m.c)\n}", "func (j *Job) start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// Check if runImmediately is set on the first run\n\t\t\tif j.firstRun && j.runImmediately {\n\t\t\t\tj.fn()\n\t\t\t}\n\t\t\tj.firstRun = false\n\n\t\t\t// Sleep for the predetermined time.\n\t\t\ttime.Sleep(j.delay)\n\n\t\t\tselect {\n\t\t\t// Check for the 'stop' signal.\n\t\t\tcase <-j.stop:\n\t\t\t\treturn\n\n\t\t\t// Execute the function.\n\t\t\tdefault:\n\t\t\t\tj.fn()\n\t\t\t}\n\t\t}\n\t}()\n}", "func (c *Chunker) Start() {\n\t// No entries in the hash table.\n\tfor i := 0; i < len(c.ht); i++ {\n\t\tc.ht[i] = -c.htlen\n\t}\n\n\t// Nothing in the queue waiting to be added to the table, either.\n\tfor i := 0; i < len(c.b); i++ {\n\t\tc.b[i] = c.p\n\t}\n\n\t// No bytes input yet.\n\tc.akr = (-c.p) % c.p\n\tc.yka = 0\n\tc.buf = c.buf[:0]\n\tc.r = 0\n\tc.rs = 1 + c.mu\n}", "func (gb *GeneratorBuilder) Start(start int) *GeneratorBuilder {\n\tgb.start = start\n\treturn gb\n}", "func (p *ProcessCalls) Start() {\n fmt.Fprintln(os.Stderr, \"Start process\")\n\tctx, cancel := context.WithCancel(context.Background())\n\taccumulatorChan := make(chan *parser.LogEntry)\n notifyNewTrace := make(chan string)\n\t// TODO: Here is a place for different scaling strategies: we can use multiple accumulator instances here.\n\tgo p.pendingTraces(ctx, accumulatorChan, notifyNewTrace)\n go registerTrace(p, notifyNewTrace,ctx)\n count := int64(0)\n increaseFactor := int64(1)\n\tfor p.in.Scan() {\n rawLogEntry := p.in.Text()\n count = count + int64(len([]byte(rawLogEntry)))\n if p.pb != nil {\n p.pb.Add(len([]byte(rawLogEntry)))\n if count >= p.pb.Total() {\n // we can use more advanced methods for grow of the progressBar\n\t\t\t // if count > int64((p.pb.Total() * (100 - decreaseFactor) /100)) {\n // if increaseFactor < 10 {\n // increaseFactor++\n // }\n p.pb.SetTotal(p.pb.Total() *(100 + increaseFactor)/100 )\n\t\t\t}\n\t\t}\n\t\tparsedLogEntry, err := parser.Parse(p.in.Text())\n\t\tif err != nil {\n fmt.Fprintln(os.Stderr,\"Malformed Lines\", err)\n\t\t\tp.info.IncrementMalformedLines()\n\t\t\tcontinue\n\t\t}\n p.info.IncrementConsumedLines()\n // fmt.Fprintln(os.Stderr, parsedLogEntry)\n\n\n\t\t// p.registerTrace(ctx, parsedLogEntry.Trace)\n\t\taccumulatorChan <- parsedLogEntry\n\t}\n\n\t// Calling cancel signals to all postponed trace processors to generate the result.\n\tcancel()\n\t// Await until all processing goroutines finish.\n\tp.pendingTracesWG.Wait()\n // fmt.Fprintln(os.Stderr, p.logs)\n fmt.Fprintln(os.Stderr, p.info)\n}", "func (ra *resourceAnalyzer) Start() {\n\tra.fsResourceAnalyzer.Start()\n}", "func (a *Agent) start() {\n\ta.initAPI()\n\tnb := 0\n\tfor {\n\t\ta.updateStreams()\n\t\tnb++\n\t\tif nb == 10 {\n\t\t\tlog.Printf(\"Sent %d logs and %d metrics on the last %d seconds\\n\", a.nbLogs, a.nbMetrics, nb*conf.period)\n\t\t\tnb = 0\n\t\t\ta.nbLogs = 0\n\t\t\ta.nbMetrics = 0\n\t\t}\n\t\ttime.Sleep(time.Duration(conf.period) * time.Second)\n\t}\n}", "func (kgw kgWorker) start(args ...interface{}) error {\n\tgo kgw.work()\n\treturn nil\n}", "func (tr *taskReader) Start() {\n\tif !atomic.CompareAndSwapInt32(\n\t\t&tr.status,\n\t\tcommon.DaemonStatusInitialized,\n\t\tcommon.DaemonStatusStarted,\n\t) {\n\t\treturn\n\t}\n\n\ttr.gorogrp.Go(tr.dispatchBufferedTasks)\n\ttr.gorogrp.Go(tr.getTasksPump)\n}", "func (o TableRangePartitioningRangeOutput) Start() pulumi.IntOutput {\n\treturn o.ApplyT(func(v TableRangePartitioningRange) int { return v.Start }).(pulumi.IntOutput)\n}", "func (a *Airport) start() {\n\t// start all the airport processors\n\tgo a.startArrivals()\n\tgo a.processArrivals()\n\tgo a.processDepartures()\n\tgo a.processHangar()\n\n\t// quit on os signal\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, os.Interrupt)\n\tfor {\n\t\tselect {\n\t\tcase <-signalChan:\n\t\t\ta.cleanUp(true)\n\t\t\tif err := safeclose.Close(a.connection); err != nil {\n\t\t\t\ta.log.Error(err)\n\t\t\t}\n\t\t\tos.Exit(1)\n\t\tcase runway, ok := <-a.runwayChan:\n\t\t\tif !ok {\n\t\t\t\ta.log.Errorf(\"runway channel closed\")\n\t\t\t}\n\t\t\ta.processRunway(runway)\n\t\t\ttime.Sleep(randomDuration(runwayClearance, runwayClearance))\n\t\t}\n\t}\n}", "func (sc *controller) startScraping() {\n\tgo func() {\n\t\tif sc.tickerCh == nil {\n\t\t\tticker := time.NewTicker(sc.collectionInterval)\n\t\t\tdefer ticker.Stop()\n\n\t\t\tsc.tickerCh = ticker.C\n\t\t}\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-sc.tickerCh:\n\t\t\t\tsc.scrapeMetricsAndReport(context.Background())\n\t\t\tcase <-sc.done:\n\t\t\t\tsc.terminated <- struct{}{}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (n *Nozzle) Start() {\n\trx := n.s.Stream(context.Background(), n.buildBatchReq())\n\n\tgo n.timerProcessor()\n\tgo n.timerEmitter()\n\tgo n.envelopeReader(rx)\n\n\tn.log.Info(\"starting workers\", logger.Count(2*runtime.NumCPU()))\n\tfor i := 0; i < 2*runtime.NumCPU(); i++ {\n\t\tgo n.pointWriter()\n\t}\n\n\tgo n.pointBatcher()\n}", "func (s *StorageLoadBalancer) start() {\n\t// TODO\n}", "func (w *worker) startWorker() {\n\tzap.L().Info(\"Starting InfluxDBworker\")\n\tfor {\n\t\tselect {\n\t\tcase event := <-w.events:\n\t\t\tw.processEvent(event)\n\t\tcase <-w.stop:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (ps *projectSetProcessor) Start(ctx context.Context) {\n\tctx = ps.StartInternal(ctx, projectSetProcName)\n\tps.input.Start(ctx)\n\tps.cancelChecker = cancelchecker.NewCancelChecker(ctx)\n}", "func (w *worker) start() {\n\tgo func() {\n\t\tfor {\n\t\t\tw.WorkerQueue <- w.Job\n\n\t\t\tselect {\n\t\t\tcase job := <-w.Job:\n\t\t\t\tlog.Printf(\"worker %d: %s\", w.ID, job.User.Login)\n\t\t\t\tjob.User.run()\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}" ]
[ "0.6754084", "0.6630149", "0.6614358", "0.6453262", "0.63123596", "0.6260204", "0.6162121", "0.6110421", "0.6065856", "0.60470873", "0.598306", "0.5982473", "0.5909885", "0.5909172", "0.58936924", "0.588361", "0.57815766", "0.5770956", "0.5755095", "0.5743116", "0.5717359", "0.5701099", "0.5696611", "0.56953067", "0.5678136", "0.56424546", "0.5634457", "0.56325746", "0.563007", "0.5626698", "0.5602886", "0.5597477", "0.5594806", "0.5578563", "0.55771637", "0.55753046", "0.5552563", "0.5549563", "0.554517", "0.5526497", "0.55207986", "0.54949504", "0.54797536", "0.5455441", "0.5442205", "0.5441189", "0.54302466", "0.5425594", "0.54127467", "0.54112524", "0.53931546", "0.5389107", "0.538782", "0.5358487", "0.5357106", "0.5347573", "0.53428656", "0.5310249", "0.53068346", "0.5305443", "0.53052473", "0.53044945", "0.529986", "0.5283183", "0.52812797", "0.52523124", "0.5250825", "0.5232455", "0.5228671", "0.5226089", "0.5224824", "0.5214293", "0.52136594", "0.5202909", "0.52012455", "0.519989", "0.5196809", "0.5192371", "0.5178914", "0.5178162", "0.5165403", "0.5164799", "0.5164424", "0.5161214", "0.5159618", "0.5159086", "0.51550967", "0.51547414", "0.5153246", "0.5152858", "0.5151896", "0.51515275", "0.51477414", "0.51457816", "0.5145636", "0.5140322", "0.5139528", "0.5138757", "0.5137489", "0.5137161" ]
0.7028437
0
stop stops the mapper.
func (m *mapper) stop() { syncClose(m.done) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}", "func (m *Map) Stop(c chan<- string) {\n\tm.bus.Stop(c)\n}", "func (p *literalProcessor) stop() { syncClose(p.done) }", "func (r *reaper) stop() {\n\tr.stopCh <- struct{}{}\n}", "func (cMap *MyStruct) Stop(){\n\tcMap.stop <- true\n}", "func (m *Module) stop() {\n\tm.mux.Lock()\n\tdefer m.mux.Unlock()\n\tif m.started && !m.isFinished() {\n\t\tclose(m.done)\n\t}\n}", "func (d *D) stop() {\n\tclose(d.stopCh)\n}", "func (r *RunCommand) stop() {\n\tr.logTail.Stop()\n\tr.pw.Stop()\n}", "func (s *schedule) stop() {\n\tif !s.running {\n\t\treturn\n\t}\n\ts.running = false\n\ts.stopCh <- struct{}{}\n}", "func (t *Tracer) Stop() {}", "func (bc *BotCommand) stop() {\n\tbc.Lock()\n\tdefer bc.Unlock()\n\tbc.running = false\n}", "func (oc *OSRMConnector) Stop() {\n\t// todo\n}", "func (margelet *Margelet) Stop() {\n\tmargelet.running = false\n}", "func (b *Blinker) Stop() {\n\tclose(b.stop)\n}", "func (jbobject *ShuffleShuffleBlockResolver) Stop() {\n\t_, err := jbobject.CallMethod(javabind.GetEnv(), \"stop\", javabind.Void)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}", "func (_m *MarkerConsumer) Stop() {\n\t_m.Called()\n}", "func (s *Scanner) Stop() {\n\ts.stop <- struct{}{}\n}", "func stop() {\n\tlog.Info(\"Maison is stopping...\")\n\n\tclosePlugins()\n\n\t// TODO: close stores\n\n\tbus.Stop()\n\n\tlog.Info(\"Maison is stopped\")\n}", "func (c *Controller) stop(name types.NamespacedName) {\n\tproc, ok := c.procs[name]\n\tif !ok {\n\t\treturn\n\t}\n\n\tif proc.cancelFunc == nil {\n\t\treturn\n\t}\n\tproc.cancelFunc()\n\t<-proc.doneCh\n\tproc.probeWorker = nil\n\tproc.cancelFunc = nil\n\tproc.doneCh = nil\n}", "func (_m *MockCompactionPlanContext) stop() {\n\t_m.Called()\n}", "func (m *patchGoom) Stop() {\n}", "func (f *FakeOutput) Stop() error { return nil }", "func stop(c *cli.Context) error {\n\n\tif !isSystemRunning() {\n\t\treturn nil\n\t}\n\t//readers, writers, _, controllers := getIPAddresses()\n\treaders, writers, _, _ := getIPAddresses()\n\n\tfor _, ipaddr := range readers {\n\t\tfmt.Println(\"reader\", ipaddr, \"StopProcess\")\n\t\tipaddrs := make([]string, 1)\n\t\tipaddrs[0] = ipaddr\n\t\tsendCommandToControllers(ipaddrs, \"StopProcess\", \"\")\n\n\t}\n\tfor _, ipaddr := range writers {\n\t\tfmt.Println(\"writer\", ipaddr, \"StopProcess\")\n\t\tipaddrs := make([]string, 1)\n\t\tipaddrs[0] = ipaddr\n\t\tsendCommandToControllers(ipaddrs, \"StopProcess\", \"\")\n\n\t}\n\n\t//sendCommandToControllers(controllers, \"StopReaders\", \"\")\n\t//sendCommandToControllers(controllers, \"StopWriters\", \"\")\n\t//sendCommandToControllers(controllers, \"StopServers\", \"\")\n\treturn nil\n}", "func (w *Watch) stop() {\n\tw.done <- struct{}{}\n}", "func (o *influxDBLogger) stop() error {\n\treturn nil\n}", "func (ms *MarvinServer) Stop() {\n\n}", "func (f *framework) stop() {\n\tclose(f.epochChan)\n}", "func (bt *Metricbeat) Stop() {\n\tclose(bt.done)\n}", "func (ldp *loopbackDataPlane) stop() bool {\n\t//This function is empty because packet capture in loopback is done as part of p4rt packet-ins but stop() still has to be defined as part of the interface definition\n\treturn true\n}", "func (sl *ReceiverLoop) stop() {\n\tsl.cancel()\n\t<-sl.stopped\n}", "func (_e *MockCompactionPlanContext_Expecter) stop() *MockCompactionPlanContext_stop_Call {\n\treturn &MockCompactionPlanContext_stop_Call{Call: _e.mock.On(\"stop\")}\n}", "func (m *ProbeManager) Stop() {\n\tclose(m.done)\n}", "func (a *appsec) stop() {\n\ta.unregisterWAF()\n\ta.limiter.Stop()\n}", "func stop() {\n\trobot.RLock()\n\tpr := robot.pluginsRunning\n\tstop := robot.stop\n\trobot.RUnlock()\n\tLog(Debug, fmt.Sprintf(\"stop called with %d plugins running\", pr))\n\trobot.Wait()\n\tbrainQuit()\n\tclose(stop)\n}", "func (ns *EsIndexer) Stop() {\n\n}", "func (r *RoverDriver) Stop() {\n r.commands <- stop\n}", "func (h *LinkerdInfo) Stop() {\n\th.log.Info(\"shutting down\")\n\tclose(h.stopCh)\n}", "func (j Jibi) Stop() {\n\tj.RunCommand(CmdStop, nil)\n}", "func (s *ContinuousScanner) Stop() {\n\ts.stop <- struct{}{}\n}", "func (s *server) stop() {\n\ts.stopMu.Lock()\n\tdefer s.stopMu.Unlock()\n\n\tclose(s.stopCh)\n\ts.stopCh = make(chan struct{})\n}", "func (m *Mesh) Stop() {\n\tclose(m.stop)\n}", "func (g *Goer) stop() {\n\t// emitted OnStop callback func.\n\tif g.OnStop != nil {\n\t\tg.OnStop()\n\t}\n\n\t// close client.\n\tg.Connections.Range(func(k, connection interface{}) bool {\n\t\tconnection.(connections.Connection).Close(\"\")\n\t\treturn true\n\t})\n\n\tg.OnMessage, g.OnError, g.OnClose, g.OnBufferDrain, g.OnBufferFull = nil, nil, nil, nil, nil\n}", "func (p *peer) stop() {\n\tp.fsmsMu.Lock()\n\tdefer p.fsmsMu.Unlock()\n\n\tfor _, fsm := range p.fsms {\n\t\tfsm.eventCh <- ManualStop\n\t}\n}", "func (w *worker) stop() {\n\tatomic.StoreInt32(&w.running, 0)\n}", "func Stop() {\n\ts.Stop()\n}", "func (w *worker) stop() {\n\tselect {\n\tcase w.stopCh <- struct{}{}:\n\tdefault: // Non-blocking.\n\t}\n}", "func (_e *MockDataCoord_Expecter) Stop() *MockDataCoord_Stop_Call {\n\treturn &MockDataCoord_Stop_Call{Call: _e.mock.On(\"Stop\")}\n}", "func Stop() {\n\tstopRunning <- true\n\n}", "func (c *M) Stop() error {\n\treturn base.Stop(c)\n}", "func (app *App) Stop() {}", "func (i *I2C) stop() {\n\t// Page 9, section 3.1.4 START and STOP conditions\n\ti.scl.Out(gpio.Low)\n\ti.sleepHalfCycle()\n\ti.scl.Out(gpio.High)\n\ti.sleepHalfCycle()\n\ti.sda.Out(gpio.High)\n\t// TODO(maruel): This sleep could be skipped, assuming we wait for the next\n\t// transfer if too quick to happen.\n\ti.sleepHalfCycle()\n}", "func (p *Peer) stop() {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\tp.heartbeatTimer.Stop()\n}", "func (p *Peer) stop() {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\tp.heartbeatTimer.Stop()\n}", "func (e *exec) stop(ctx context.Context) {\n\t// Lock the mutex to prevent race conditions with Start\n\te.execMutex.Lock()\n\tdefer e.execMutex.Unlock()\n\n\t// Do the shutdown sequence once until the startup sequence resets\n\te.stopOnce.Do(func() {\n\t\tdefer func() {\n\t\t\t// reset startOnce so the startup sequence can happen again\n\t\t\te.startOnce = sync.Once{}\n\t\t}()\n\t\te.stopFn(ctx)\n\t})\n}", "func Stop() {\n\tinstance.stop()\n}", "func (r *RecordStream) Stop() {\n\tif r.state == running {\n\t\tr.c.c.Request(&proto.CorkRecordStream{StreamIndex: r.index, Corked: true}, nil)\n\t\tr.state = idle\n\t}\n}", "func (tw *TimeWheel) Stop() {\n\ttw.stopFlag <- struct{}{}\n}", "func (pool *WebSocketPool)stop() {\n\tclose(pool.input)\n}", "func (hb *heartbeat) stop() {\n\tselect {\n\tcase hb.stopChan <- struct{}{}:\n\tdefault:\n\t}\n}", "func (eis *eventSocket) stop() error {\n\teis.log.Info(\"closing Chain IPC\")\n\terrs := wrappers.Errs{}\n\terrs.Add(eis.unregisterFn(), eis.socket.Close())\n\treturn errs.Err\n}", "func (a API) Stop(cmd *None) (e error) {\n\tRPCHandlers[\"stop\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (e *binaryExprEvaluator) stop() {\n\te.lhs.stop()\n\te.rhs.stop()\n\tsyncClose(e.done)\n}", "func (w *Wheel) Stop() {\n\tw.stopper.TrySend()\n}", "func (b *Bootstrapper) Stop() error {\n\treturn nil\n}", "func (b *Bootstrapper) Stop() error {\n\treturn nil\n}", "func (it *messageIterator) stop() {\n\tit.cancel()\n\tit.mu.Lock()\n\tit.checkDrained()\n\tit.mu.Unlock()\n\tit.wg.Wait()\n}", "func (l *Learner) Stop() {\n\t//TODO(student): Task 3 - distributed implementation\n}", "func (a *actorsRuntime) Stop() {\n\tif a.placement != nil {\n\t\ta.placement.Stop()\n\t}\n}", "func (h *Handler) Stop() {\n\th.Lock()\n\tdefer h.Unlock()\n\tif h.stopped {\n\t\treturn\n\t}\n\tclose(h.close)\n\th.manager.Stop()\n\th.store.Close()\n\th.stopped = true\n\th.l.Info(\"beacon\", \"stop\")\n}", "func (g *DarwinGrabber) Stop() {\n\tclose(g.stop)\n}", "func (l *Loader) Stop() {\n\tl.spn.Stop()\n}", "func (e *Engine) stop() error {\n\te.booted = false\n\n\t// instruct engine to shutdown\n\tshutdown := \"shutdown\"\n\tcommunication.Publish(\n\t\tnaming.Topic(e.Index, naming.Command),\n\t\tnaming.Publisher(e.Index, naming.Command),\n\t\tshutdown)\n\n\t// stop subscribing to engine's commands and events\n\te.Communication.Teardown()\n\n\t// TODO create graphic for MQTT hierarchy, whos's publishing what to whom and why\n\t// TODO explain MQTT hierarchy\n\treturn nil\n}", "func (w *Processor) Stop() {\n\tclose(w.stop)\n}", "func (rf *Raft) stop(timer *time.Timer) {\n\tif !timer.Stop() && len(timer.C) != 0 {\n\t\t<-timer.C\n\t}\n}", "func (a *Acceptor) Stop() {\n\t//TODO(student): Task 3 - distributed implementation\n\ta.stop <- 0\n\n}", "func (tm *ServiceTracerouteManager) Stop() {\n\ttm.StopChan <- true\n}", "func (i2c I2C) stop() {\n\t// Send stop condition.\n\tavr.TWCR.Set(avr.TWCR_TWEN | avr.TWCR_TWINT | avr.TWCR_TWSTO)\n\n\t// Wait for stop condition to be executed on bus.\n\tfor !avr.TWCR.HasBits(avr.TWCR_TWSTO) {\n\t}\n}", "func (w *StatsWriter) Stop() {\n\tw.stop <- struct{}{}\n\t<-w.stop\n\tstopSenders(w.senders)\n}", "func (sp *StreamPool) Stop() {\n\t//sw.quitCh <- true\n}", "func (t *channelTransport) stop() {\n\tt.stopChan <- struct{}{}\n}", "func (np *nodeProcess) stop() error {\n\terr := np.node.Stop()\n\tnp.rawClient.Kill()\n\treturn err\n}", "func (l *Launcher) Stop() {\n\tl.stop <- struct{}{}\n\tstopper := startstop.NewParallelStopper()\n\tfor identifier, tailer := range l.tailers {\n\t\tstopper.Add(tailer)\n\t\tdelete(l.tailers, identifier)\n\t}\n\tstopper.Stop()\n}", "func (er *BufferedExchangeReporter) Stop() {\n\n}", "func stop() error {\n\tif spammerInstance == nil {\n\t\treturn ErrSpammerDisabled\n\t}\n\n\tspammerLock.Lock()\n\tdefer spammerLock.Unlock()\n\n\tstopWithoutLocking()\n\n\tisRunning = false\n\n\treturn nil\n}", "func (m *Manager) Stop() {\n\tclose(m.stop)\n\t<-m.stopDone\n}", "func op_STOP(pc *uint64, in *interpreter, ctx *callCtx) uint64 {\n\treturn 0\n}", "func (dt *discoveryTool) stop() {\n\tclose(dt.done)\n\n\t//Shutdown timer\n\ttimer := time.NewTimer(time.Second * 3)\n\tdefer timer.Stop()\nL:\n\tfor { //Unblock go routine by reading from dt.dataChan\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tbreak L\n\t\tcase <-dt.dataChan:\n\t\t}\n\t}\n\n\tdt.wg.Wait()\n}", "func (_e *MockQueryCoord_Expecter) Stop() *MockQueryCoord_Stop_Call {\n\treturn &MockQueryCoord_Stop_Call{Call: _e.mock.On(\"Stop\")}\n}", "func (f *Flame) Stop() {\n\tf.stop <- struct{}{}\n}", "func (_m *MessageMain) Stop() {\n\t_m.Called()\n}", "func (manager *BarWriter) Stop() {\n\tmanager.stopChan <- struct{}{}\n}", "func (app *frame) Stop() {\n\tapp.isStopped = true\n}", "func (p *noop) Stop() {}", "func (zl *ZapLogger) Stop() error {\n\treturn zl.logger.Sync()\n}", "func (b *breachArbiter) Stop() error {\n\tif !atomic.CompareAndSwapUint32(&b.stopped, 0, 1) {\n\t\treturn nil\n\t}\n\n\tbrarLog.Infof(\"Breach arbiter shutting down\")\n\n\tclose(b.quit)\n\tb.wg.Wait()\n\n\treturn nil\n}", "func (v *vtStopCrawler) stop() {\n\tfor _, worker := range v.workers {\n\t\tworker.stop()\n\t}\n\tclose(v.done)\n}", "func (this *Reporter) Stop() {\n\tthis.Status = REPORT_STATUS_STOP\n}", "func (a *Attacker) Stop() {\n\tselect {\n\tcase <-a.stopch:\n\t\treturn\n\tdefault:\n\t\tclose(a.stopch)\n\t}\n}", "func (this *SimulateLocationService) Stop() error {\n\tif _, err := this.service.GetConnection().Write([]byte{0x00, 0x00, 0x00, 0x01}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (a *Agent) Stop() {\n\ta.init()\n\ta.stopCh <- struct{}{}\n}" ]
[ "0.7641731", "0.69857275", "0.6716965", "0.66754645", "0.6570976", "0.6499061", "0.6379476", "0.6372358", "0.6363777", "0.6234526", "0.6179535", "0.61789393", "0.61702853", "0.61676526", "0.61521393", "0.6150348", "0.612782", "0.6109738", "0.61095876", "0.6086529", "0.60829526", "0.6067617", "0.60675937", "0.60494286", "0.60375524", "0.5985228", "0.59612924", "0.595041", "0.59284323", "0.5922956", "0.5917607", "0.5913644", "0.5909719", "0.58894056", "0.58746403", "0.5854846", "0.5827991", "0.58181435", "0.5814637", "0.58075947", "0.58070016", "0.5789467", "0.578253", "0.575914", "0.57564616", "0.5751582", "0.5746453", "0.57408005", "0.57349837", "0.5734208", "0.57325023", "0.5732454", "0.5732454", "0.57314277", "0.5725699", "0.5717907", "0.57175124", "0.57158816", "0.57107466", "0.57051444", "0.5703802", "0.5691173", "0.5685845", "0.56845176", "0.56845176", "0.5683176", "0.56797385", "0.5676062", "0.567069", "0.5659979", "0.56598806", "0.5659068", "0.56577927", "0.56567055", "0.565407", "0.565301", "0.5652524", "0.5647343", "0.5646173", "0.5643588", "0.5643492", "0.56419677", "0.564151", "0.564044", "0.5637646", "0.5633623", "0.56330067", "0.56247765", "0.561866", "0.56166506", "0.56159985", "0.5614867", "0.56094617", "0.5606557", "0.56047124", "0.56040424", "0.5598985", "0.55972177", "0.5594376", "0.5589106" ]
0.8386904
0
C returns the streaming data channel.
func (m *mapper) C() <-chan map[string]interface{} { return m.c }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Subscription) C() <-chan interface{} {\n\treturn s.channel\n}", "func (s *subscription) C() <-chan interface{} {\n\treturn s.c\n}", "func (c *dataReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (uc *UnboundedChannel) Get() <-chan interface{} {\n\treturn uc.channel\n}", "func (l *Logger) C() chan<- interface{} {\n\treturn l.src\n}", "func (s *Scanner) C() <-chan []Measurement {\n\treturn s.ch\n}", "func (p *HostedProgramInfo) Channel() io.ReadWriteCloser {\n\treturn p.TaoChannel\n}", "func (conn *Connection) Channel() chan []byte {\n\treturn conn.channel\n}", "func (p *literalProcessor) C() <-chan map[string]interface{} { return p.c }", "func (remote *SerialRemote) Channel() chan []byte {\n\treturn remote.channel\n}", "func (ticker *PausableTicker) GetChannel() <-chan time.Time {\n\treturn ticker.channel\n}", "func (o *Output) Read(channel int) *Buffer {\n\treturn o.channels[channel].Copy()\n}", "func (f *FFS) Get(ctx context.Context, c cid.Cid) (io.Reader, error) {\n\tstream, err := f.client.Get(ctx, &rpc.GetRequest{\n\t\tCid: util.CidToString(c),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader, writer := io.Pipe()\n\tgo func() {\n\t\tfor {\n\t\t\treply, err := stream.Recv()\n\t\t\tif err == io.EOF {\n\t\t\t\t_ = writer.Close()\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\t_ = writer.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = writer.Write(reply.GetChunk())\n\t\t\tif err != nil {\n\t\t\t\t_ = writer.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn reader, nil\n}", "func (s *p4RuntimeServer) StreamChannel(stream p4.P4Runtime_StreamChannelServer) error {\n\tfmt.Println(\"Starting bi-directional channel\")\n\tfor {\n\t\tinData, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Printf(\"%v\", inData)\n\t}\n\n\treturn nil\n}", "func (wp *Pool) C() <-chan Processor {\n\treturn wp.resultChan\n}", "func (p *pipeline) Channel() Channel {\n\treturn p.channel\n}", "func (c *webSocketClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (r *reducer) C() <-chan map[string]interface{} { return r.c }", "func (e *binaryExprEvaluator) C() <-chan map[string]interface{} { return e.c }", "func (c *Computation) Data() <-chan *messages.DataMessage {\n\treturn c.dataCh\n}", "func (p *Player) Channel() *api.Channel {\n\tretCh := make(chan *api.Channel)\n\tp.chGetChannel <- retCh\n\tc := <-retCh\n\treturn c\n}", "func (me *T) Data() <-chan float64 {\n\n\t// Create channel.\n\t//\n\t// We will return this to the caller.\n\t//\n\t// We will also spawn a goroutine and output the data from this datasack has onto it.\n\t//\n\t\tout := make(chan float64)\n\n\t// Spawn a goroutine that will output the data from this datasack onto the channel\n\t// we previously created.\n\t//\n\t// Note that this goroutine will probably block. But that's OK, since it is in\n\t// its own goroutine (and shouldn't take anything else down with it).\n\t//\n\t\tgo func() {\n\t\t\tfor _,value := range me.slice {\n\t\t\t\tout <- value\n\t\t\t}\n\n\t\t\tclose(out)\n\t\t}()\n\n\t// Return.\n\t\treturn out\n}", "func ReadData(c <-chan string) {\n\tfmt.Printf(\"Read Data: %s\\n\", <-c) // 只能收\n}", "func (wet *WETReader) Channel() (<-chan struct { Entry *WETEntry; Err error }) {\n channel := make(chan struct { Entry *WETEntry; Err error })\n go func() {\n defer func() {\n wet.Close()\n close(channel)\n }()\n for {\n entry, err := wet.extractEntry()\n channel <- struct { Entry *WETEntry; Err error }{ entry, err }\n if err != nil {\n return\n }\n }\n }()\n return channel\n}", "func (s *GameSocket) ReadChannel() <-chan *packet.Packet {\n\treturn s.readChan\n}", "func GetChannel(protocol, host string, port int, secureConfig *tls.Config) (ReaderWriterCloser, error) {\n\tvar conn net.Conn\n\tvar err error\n\tconn, err = net.Dial(protocol, host+\":\"+strconv.Itoa(port))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif protocol == \"tcp\" {\n\t\tconn.(*net.TCPConn).SetKeepAlive(true)\n\t\tconn.(*net.TCPConn).SetKeepAlivePeriod(30 * time.Second)\n\t}\n\tif secureConfig != nil {\n\t\tconn = tls.Client(conn, secureConfig)\n\t}\n\tvar readerWriter ReaderWriterCloser = &Channel{\n\t\tprotocol: protocol,\n\t\thost: host,\n\t\tport: port,\n\t\tconn: conn,\n\t\tmaxRead: 8 * 1024,\n\t\treadBuffer: make([]byte, 0),\n\t\twriteBuffer: make([]byte, 0),\n\t\twriteChannel: make(chan writeComplete, 100),\n\t\treadTimeout: 60 * time.Second,\n\t\twriteTimeout: 60 * time.Second,\n\t}\n\tgo readerWriter.(*Channel).writeRoutine()\n\treturn readerWriter, nil\n}", "func (f *feedback) Channel() (<-chan *FeedbackMessage, error) {\n\tif f.conn != nil {\n\t\treturn f.chanel, nil\n\t}\n\n\tif err := f.createConnection(); err != nil {\n\t\tlogerr(\"Unable to start feedback connection: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tf.stopWait.Add(1)\n\tgo f.monitorService()\n\n\treturn f.chanel, nil\n}", "func (c *webSocketFrameReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (res channelBase) Channel() *types.Channel {\n\treturn res.channel\n}", "func (o *KinesisOutput) GetOutputChannel() chan []byte {\n\treturn o.outputChannel\n}", "func (s VectOp) Stream() <-chan float64 {\n\tch := make(chan float64)\n\tgo feed(ch, s)\n\treturn ch\n}", "func (s *f64) Channel(c int) Floating {\n\treturn floatingChannel{\n\t\tbuffer: s,\n\t\tchannel: c,\n\t}\n}", "func (m *Manager) InputChannel() chan []byte {\n\treturn m.byteStream\n}", "func (m *MetricsExtracor) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (m *Module) Stream() <-chan bar.Output {\n\tch := base.NewChannel()\n\tgo m.worker(ch)\n\treturn ch\n}", "func (c *webSocketFrameSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (meta *MetaAI) GetChannel(c chan string) {\n\tmeta.l.Lock()\n\tdefer meta.l.Unlock()\n\n\tmeta.i = c\n}", "func getData(client pb.DataClient, filter *pb.DataFilter) {\r\n\t// calling the streaming API\r\n\tstream, err := client.GetData(context.Background(), filter)\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Error on get data: %v\", err)\r\n\t}\r\n\tfor {\r\n\t\tdata, err := stream.Recv()\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t}\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatalf(\"%v.GetData(_) = _, %v\", client, err)\r\n\t\t}\r\n\t\tlog.Printf(\"Data: %v\", data)\r\n\t}\r\n}", "func (c *requestServedFromCacheClient) GetStream() rpcc.Stream { return c.Stream }", "func (e *EventNotif) Channel() (res <-chan Event) {\n\treturn e.eventsCh\n}", "func (c *webSocketCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (stream *MAMWriteStream) Open() (trinary.Trytes, error) {\n\tchannelID, err := stream.m.ChannelCreate(5)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstream.currentChannelID = channelID\n\treturn channelID, nil\n}", "func (gi *Invoker) StreamRecv(param *common.Params) error {\n\t//gloryPkg := newGloryRequestPackage(\"\", param.MethodName, uint64(common.StreamSendPkg), param.Seq)\n\t//gloryPkg.Params = append(gloryPkg.Params, param.Value)\n\t//gloryPkg.Header.ChanOffset = param.ChanOffset\n\t//gloryPkg.Header.Seq = param.Seq\n\t//if err := gloryPkg.sendToConn(gi.gloryConnClient, gi.handler); err != nil {\n\t//\tlog.Error(\"StreamRecv: gloryPkg.sendToConn(gi.conn, gi.handler) err =\", err)\n\t//\treturn GloryErrorConnErr\n\t//}\n\treturn nil\n}", "func WrapDataChannel(rtcDataChannel RTCDataChannel) (*DataChannel, error) {\n\trr, rw, err := Pipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdc := &DataChannel{\n\t\tdc: rtcDataChannel,\n\t\trr: rr,\n\t}\n\tdc.dc.OnMessage(func(data []byte) {\n\t\tlog.WithField(\"data\", data).\n\t\t\tDebug(\"datachannel message\")\n\n\t\tif rw != nil {\n\t\t\t_, err := rw.Write(data)\n\t\t\tif err != nil {\n\t\t\t\trw.Close()\n\t\t\t\trw = nil\n\t\t\t}\n\t\t}\n\t})\n\treturn dc, nil\n}", "func (c *responseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *CounterChannel) Get() uint64 {\n\tc.check()\n\treturn <-c.readCh\n}", "func (c *webTransportClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (m *MetricsHolder) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (c *ChanReader) Read(out []byte) (int, error) {\n\tif c.buffer == nil {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(out, c.buffer)\n\tc.buffer = c.buffer[n:]\n\tif len(out) <= len(c.buffer) {\n\t\treturn n, nil\n\t} else if n > 0 {\n\t\t// We have some data to return, so make the channel read optional\n\t\tselect {\n\t\tcase p := <-c.input:\n\t\t\tif p == nil { // Stream was closed\n\t\t\t\tc.buffer = nil\n\t\t\t\tif n > 0 {\n\t\t\t\t\treturn n, nil\n\t\t\t\t}\n\t\t\t\treturn 0, io.EOF\n\t\t\t}\n\t\t\tn2 := copy(out[n:], p.Data)\n\t\t\tc.buffer = p.Data[n2:]\n\t\t\treturn n + n2, nil\n\t\tdefault:\n\t\t\treturn n, nil\n\t\t}\n\t}\n\tvar p *StreamChunk\n\tselect {\n\tcase p = <-c.input:\n\tcase <-c.interrupt:\n\t\tc.buffer = c.buffer[:0]\n\t\treturn n, ErrInterrupted\n\t}\n\tif p == nil { // Stream was closed\n\t\tc.buffer = nil\n\t\treturn 0, io.EOF\n\t}\n\tn2 := copy(out[n:], p.Data)\n\tc.buffer = p.Data[n2:]\n\treturn n + n2, nil\n}", "func (handle *Handle) GetStream() (Stream, error) {\n\tvar s Stream\n\tvar some *C.cudaStream_t\n\t//x := C.cudnnHandle_t(handle.Pointer())\n\n\ty := C.cudnnGetStream(handle.x, some)\n\ts.stream = *some\n\treturn s, Status(y).error(\"(*Handle).GetStream\")\n}", "func (c *loadingFinishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (std *ReaderService) Read() (<-chan []byte, error) {\n\tmc := make(chan []byte, 0)\n\n\tstd.pub.Subscribe(mc)\n\n\treturn mc, nil\n}", "func (c ConnectionAdapter) Channel() (Channel, error) {\n\treturn c.Connection.Channel()\n}", "func SourceData(data ...int) <-chan int {\n\tfmt.Println(\"num:\", len(data))\n\tch := make(chan int, 80000000)\n\tgo func() {\n\t\tfor _, v := range data {\n\t\t\tch <- v\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func (c *webSocketHandshakeResponseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *baseChannels) GetS3Channel() chan *S3Object {\n\treturn c.s3Channel\n}", "func (r *Readiness) GetChannel() chan ReadinessMessage {\n\treturn r.channel\n}", "func (nc *NetClient) readChannel() chan struct {\n\t*arbor.ProtocolMessage\n\terror\n} {\n\tout := make(chan struct {\n\t\t*arbor.ProtocolMessage\n\t\terror\n\t})\n\t// read messages continuously and send results back on a channel\n\tgo func() {\n\t\tdefer func() {\n\t\t\t// ensure send on closed channel doesn't cause panic\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tif _, ok := err.(runtime.Error); !ok {\n\t\t\t\t\t// silently cancel runtime errors, but allow other errors\n\t\t\t\t\t// to propagate.\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tfor {\n\t\t\tm := new(arbor.ProtocolMessage)\n\t\t\terr := nc.ReadWriteCloser.Read(m)\n\t\t\tout <- struct {\n\t\t\t\t*arbor.ProtocolMessage\n\t\t\t\terror\n\t\t\t}{m, err}\n\t\t}\n\t}()\n\treturn out\n}", "func (c *eventSourceMessageReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func UnbufferedChannel() {\n\t/*\n\tbufferred channel would be c := make(chan int, 50)\n\tunbufferred channel\n\t */\n\tc := make(chan int)\n\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t// put number onto channel\n\t\t\t// code stops until the value is taken from the channel\n\t\t\t// like a relay race\n\t\t\tc <- i\n\t\t}\n\t}() // self executing anonymous function\n\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t// take the number off the channel\n\t\t\t// receive the value from the channel and print it\n\t\t\tv := <-c\n\t\t\tfmt.Println(v)\n\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second)\n}", "func stream_copy(src io.Reader, dst io.Writer) <-chan int {\n\tbuf := make([]byte, 1024)\n\tsync_channel := make(chan int)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif con, ok := dst.(net.Conn); ok {\n\t\t\t\tcon.Close()\n\t\t\t\tlog.Printf(\"Connection from %v is closed\\n\", con.RemoteAddr())\n\t\t\t}\n\t\t\tsync_channel <- 0 // Notify that processing is finished\n\t\t}()\n\t\tfor {\n\t\t\tvar nBytes int\n\t\t\tvar err error\n\t\t\tnBytes, err = src.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tlog.Printf(\"Read error: %s\\n\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = dst.Write(buf[0:nBytes])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Write error: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn sync_channel\n}", "func (c *CryptoStreamConn) GetDataForWriting() []byte {\n\tdefer c.writeBuf.Reset()\n\tdata := make([]byte, c.writeBuf.Len())\n\tcopy(data, c.writeBuf.Bytes())\n\treturn data\n}", "func Stream(out chan<- Value) error {\n for {\n v, err := DoSomething() // HL\n if err != nil {\n return err\n }\n out <- v // HL\n }\n }", "func streamCopy(src io.Reader, dst io.Writer) <-chan int {\n\tbuf := make([]byte, 1024)\n\tsyncChannel := make(chan int)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif con, ok := dst.(net.Conn); ok {\n\t\t\t\tcon.Close()\n\t\t\t\t//log.Printf(\"Connection from %v is closed\\n\", con.RemoteAddr())\n\t\t\t}\n\t\t\tsyncChannel <- 0 // Notify that processing is finished\n\t\t}()\n\t\tfor {\n\n\t\t\tvar nBytes int\n\t\t\tvar err error\n\t\t\tnBytes, err = src.Read(buf)\n\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\t//log.Printf(\"Read error: %s\\n\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = dst.Write(buf[0:nBytes])\n\t\t\tif err != nil {\n\t\t\t\t//log.Fatalf(\"Write error: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn syncChannel\n}", "func (v Vehicle) Stream() (chan *StreamEvent, chan error, error) {\n\turl := StreamURL + \"/stream/\" + strconv.Itoa(v.VehicleID) + \"/?values=\" + StreamParams\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.SetBasicAuth(ActiveClient.Auth.Email, v.Tokens[0])\n\tresp, err := ActiveClient.HTTP.Do(req)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\teventChan := make(chan *StreamEvent)\n\terrChan := make(chan error)\n\tgo readStream(resp, eventChan, errChan)\n\n\treturn eventChan, errChan, nil\n}", "func NewChannel() (chan *fluent.FluentRecordSet, chan Stat) {\n\tmessageCh := make(chan *fluent.FluentRecordSet, MessageChannelBufferLen)\n\tmonitorCh := make(chan Stat, MonitorChannelBufferLen)\n\treturn messageCh, monitorCh\n}", "func NewChannel() (chan *fluent.FluentRecordSet, chan Stat) {\n\tmessageCh := make(chan *fluent.FluentRecordSet, MessageChannelBufferLen)\n\tmonitorCh := make(chan Stat, MonitorChannelBufferLen)\n\treturn messageCh, monitorCh\n}", "func (c *requestInterceptedClient) GetStream() rpcc.Stream { return c.Stream }", "func (r *Receiver) Read() interface{} {\n\tutils.Debugln(\"Reading\")\n\tb := <-r.C // wait for a broadast channel\n\tv := b.v // retrieve value from received broadcastchannel\n\tr.C <- b // write same broadcastchannel to broadcastchannel\n\tr.C = b.c // broadcastchannel now becomes bc from broadcast\n\treturn v // return received value\n}", "func Stream(ctx context.Context, wC etcd.WatchChan) <-chan *etcd.Event {\n\teC := make(chan *etcd.Event, 1024)\n\n\tgo func(ctx context.Context, ec chan *etcd.Event) {\n\t\t// this unblocks any callers ranging on ec\n\t\tdefer close(ec)\n\n\t\t// etcd client will close this channel if error occurs\n\t\tfor wResp := range wC {\n\t\t\tif ok, err := chkctx.Check(ctx); ok {\n\t\t\t\tlog.Info().Str(\"component\", \"Stream\").Msgf(\"stream ctx canceled. returning: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif wResp.Canceled {\n\t\t\t\tlog.Info().Str(\"component\", \"Stream\").Msgf(\"watch channel error encountered. returning: %v\", wResp.Err())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, event := range wResp.Events {\n\t\t\t\teC <- event\n\t\t\t}\n\t\t}\n\t}(ctx, eC)\n\n\treturn eC\n}", "func (c *ChangeWatcher) outC() chan *RoomChange {\n if len(c.buffer) <= 0 {\n return nil\n }\n return c.out\n}", "func (closer *Closer) CloseChannel() chan struct{} {\n\treturn closer.channel\n}", "func (r *ChannelReader) Read(b []byte) (sz int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tif len(r.buf) <= len(b) {\n\t\t\t\tsz = len(r.buf)\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = nil\n\t\t\t} else {\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = r.buf[len(b):]\n\t\t\t\tsz = len(b)\n\t\t\t}\n\t\t\treturn sz, nil\n\t\t}\n\n\t\tvar ok bool\n\t\tif r.deadline.IsZero() {\n\t\t\tr.buf, ok = <-r.c\n\t\t} else {\n\t\t\ttimer := time.NewTimer(r.deadline.Sub(time.Now()))\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase r.buf, ok = <-r.c:\n\t\t\tcase <-timer.C:\n\t\t\t\treturn 0, context.DeadlineExceeded\n\t\t\t}\n\t\t}\n\t\tif len(r.buf) == 0 && !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n}", "func (swp *SourceWorkerPool) GetOutputChannel() (chan map[string]interface{}, error) {\n\treturn swp.outputChannel, nil\n}", "func (p *Publisher) GetChannel() *amqp.Channel {\n\tp.publicMethodsLock.Lock()\n\tdefer p.publicMethodsLock.Unlock()\n\treturn p.getChannelWithoutLock()\n}", "func StreamCreateFile(data interface{}, offset int, flags Flags) (Channel, error) {\n\tvar ch C.DWORD\n\tswitch data := data.(type) {\n\tcase CBytes:\n\t\tch = C.BASS_StreamCreateFile(1, data.Data, culong(offset), culong(data.Length), cuint(flags))\n\tcase string:\n\t\tcstring := unsafe.Pointer(C.CString(data))\n\t\tdefer C.free(cstring)\n\t\tch = C.BASS_StreamCreateFile(0, cstring, culong(offset), 0, cuint(flags))\n\tcase []byte:\n\t\tcbytes := C.CBytes(data)\n\t\tch = C.BASS_StreamCreateFile(1, cbytes, culong(offset), culong(len(data)), cuint(flags))\n\t\t// unlike BASS_SampleLoad, BASS won't make a copy of the sample data internally, which means we can't just pass a pointer to the Go bytes. Instead we need to set a sync to free the bytes when the stream it's associated with is freed\n\t\tif ch != 0 {\n\t\t\tchannel := Channel(ch)\n\t\t\t_, err := channel.SetSync(SYNC_FREE, SYNC_ONETIME, 0, SyncprocFree, cbytes)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n\treturn channelToError(ch)\n}", "func (sc *SoundCloud) Stream(track string) (io.ReadCloser, error) {\n\t// Get the HTTP Stream\n\trsp, err := http.Get(sc.streamUrl(track).String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Createa http stream buffer\n\tbuff := buffer.HTTPBuffer(rsp)\n\tgo buff.Buffer() // Start buffering\n\tscs := &SoundCloudStream{\n\t\tbuffer: buff,\n\t\tdecoder: &mpa.Reader{Decoder: &mpa.Decoder{Input: buff}},\n\t}\n\treturn scs, nil\n}", "func (cc *CounterControl) StreamValues() (chan *CounterData, error) {\n\tentity := cc.counter.ReadWildcardRequest()\n\tentityList := []*p4V1.Entity{entity}\n\n\tcounterEntityCh, err := cc.control.Client.ReadEntities(entityList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcdataChannel := make(chan *CounterData, cc.counter.Size)\n\tgo func() {\n\t\tdefer close(cdataChannel)\n\t\tfor e := range counterEntityCh {\n\t\t\tcounterData := getCounterData(e)\n\t\t\tcdataChannel <- &counterData\n\t\t}\n\t}()\n\n\treturn cdataChannel, nil\n}", "func (p *Pool) Consume() <-chan interface{} {\n\treturn p.c\n}", "func BufferedChannels(){\n\tc := make(chan int, 2)\n\tc <- 1\n\tc <- 2\n\tfmt.Println(<-c)\n\tfmt.Println(<-c)\n}", "func (std *LineReaderService) Read() (<-chan []byte, error) {\n\tmc := make(chan []byte, 0)\n\n\tstd.pub.Subscribe(mc)\n\n\treturn mc, nil\n}", "func (s *Chan) Pipe(rwc io.ReadWriteCloser) {\n\ts.connection = rwc\n\tgo s.readFromReader(rwc)\n\tgo s.writeToWriter(rwc)\n}", "func outputData(outputChannel chan string) {\n\n\tfor {\n\t\tdata := <-outputChannel\n\t\tfmt.Println(data)\n\t}\n}", "func (l *ChannelList) Get(key string) *Channel {\n\t// get a conn bucket\n\tb := l.Bucket(key)\n\tb.Lock()\n\tif c, ok := b.data[key]; ok {\n\t\tb.Unlock()\n\t\tChStat.IncrAccess()\n\t\treturn c\n\t}\n\tb.Unlock()\n\treturn nil\n}", "func (c *requestWillBeSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportConnectionEstablishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (p *pool) get() (*channel, error) {\n\tif p.closed {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\tactiveChannel, ok := <-p.readyChannel\n\tif !ok {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\treturn activeChannel, nil\n}", "func (c *webSocketFrameErrorClient) GetStream() rpcc.Stream { return c.Stream }", "func (k *ChannelKeeper) Channel() *amqp.Channel {\n\treturn k.msgCh\n}", "func (this *FtpsClient) OpenFtpDataChannel(_FtpCommand_S string, _ExpectedReplyCode_i int) (rReplyCode_i int, rReplyMessage_S string, rRts error) {\n\trRts = this.sendRequestToFtpServerDataConn(_FtpCommand_S, _ExpectedReplyCode_i)\n\treturn\n}", "func (c *Client) StreamingDirect(ctx context.Context) (chan Event, error) {\n\treturn c.streaming(ctx, \"direct\", nil)\n}", "func (c *webSocketWillSendHandshakeRequestClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (ch *RingChannel) Out() <-chan interface{} {\n\treturn ch.output\n}", "func (c *remoteConn) OpenChannel(name string, data []byte) (ssh.Channel, error) {\n\tchannel, _, err := c.sconn.OpenChannel(name, data)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn channel, nil\n}", "func (r *realTimer) C() <-chan time.Time {\n\treturn r.timer.C\n}", "func (c *cdcClient) recv() {\n\tc.debug(\"recv call\")\n\tdefer c.debug(\"recv return\")\n\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tc.shutdown(err)\n\t\t}\n\t\tclose(c.events)\n\t}()\n\n\tvar now time.Time\n\tfor {\n\t\t_, bytes, rerr := c.wsConn.ReadMessage()\n\t\tnow = time.Now()\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {\n\t\t\t\terr = rerr\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// CDC events should be the bulk of data we recv, so presume it's that.\n\t\tvar e CDCEvent\n\t\tif err = json.Unmarshal(bytes, &e); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// If event ID is set (not empty), then it's a CDC event as expected\n\t\tif e.Id != \"\" {\n\t\t\tc.debug(\"cdc event: %#v\", e)\n\t\t\tselect {\n\t\t\tcase c.events <- e: // send CDC event to caller\n\t\t\tdefault:\n\t\t\t\tc.debug(\"caller blocked\")\n\t\t\t\tc.shutdown(ErrCallerBlocked)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// It's not a CDC event, so it should be a control message\n\t\t\tvar msg map[string]interface{}\n\t\t\tif err = json.Unmarshal(bytes, &msg); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, ok := msg[\"control\"]; !ok {\n\t\t\t\t// This shouldn't happen: data is not a CDC event or a control message\n\t\t\t\tc.shutdown(ErrBadData)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err = c.control(msg, now); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *Channel) Channels() Channels {\n\treturn c.children\n}", "func (r *chanReader) Read(data []byte) (int, error) {\n\tvar ok bool\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tn := copy(data, r.buf)\n\t\t\tr.buf = r.buf[n:]\n\t\t\tmsg := windowAdjustMsg{\n\t\t\t\tPeersId: r.clientChan.peersId,\n\t\t\t\tAdditionalBytes: uint32(n),\n\t\t\t}\n\t\t\treturn n, r.clientChan.writePacket(marshal(msgChannelWindowAdjust, msg))\n\t\t}\n\t\tr.buf, ok = <-r.data\n\t\tif !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}", "func bufferedChannelTest() {\n\tch := make(chan int, 2)\n\tch <- 1\n\tch <- 2\n\t// ch <- 3 \n\tfmt.Println(<-ch)\n\tfmt.Println(<-ch)\n}" ]
[ "0.6759161", "0.6602864", "0.61977434", "0.6079438", "0.6015766", "0.59815305", "0.5894958", "0.5827573", "0.5811892", "0.5789283", "0.5787107", "0.5769193", "0.5764194", "0.57620907", "0.5744001", "0.57320946", "0.5710497", "0.5659543", "0.56509525", "0.56507605", "0.5619693", "0.56017905", "0.55412376", "0.5511171", "0.55090964", "0.5497537", "0.5495607", "0.5485725", "0.54844636", "0.5469122", "0.54626715", "0.544634", "0.5438606", "0.5433448", "0.5426227", "0.5426032", "0.541403", "0.5402727", "0.5391055", "0.5363698", "0.53503805", "0.53499436", "0.5327847", "0.5324007", "0.53198254", "0.5317453", "0.53115785", "0.5304445", "0.5300533", "0.528647", "0.5275732", "0.5242419", "0.5225599", "0.5221367", "0.5218112", "0.52100986", "0.5196145", "0.5187939", "0.51850754", "0.51704204", "0.516902", "0.51667434", "0.516405", "0.5132859", "0.512949", "0.51228505", "0.51228505", "0.51204574", "0.5118935", "0.5117812", "0.510126", "0.5093387", "0.5090184", "0.5083214", "0.50773835", "0.50705665", "0.5065232", "0.5064193", "0.5061628", "0.5051974", "0.50492626", "0.5042226", "0.5035097", "0.5022692", "0.501978", "0.5019312", "0.5008553", "0.49940854", "0.4989732", "0.4983017", "0.49754548", "0.4958028", "0.49492064", "0.49402693", "0.4940178", "0.49344555", "0.49289474", "0.49203157", "0.49100748", "0.4905373" ]
0.53927493
38
run executes the map function against the iterator.
func (m *mapper) run() { for m.itr.NextIterval() { m.fn(m.itr, m) } close(m.c) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (conn *db) runMap(stmt Stmt, mapper MapMapper) (rowsReturned int, err error) {\n\tif err = conn.Connect(); err != nil {\n\t\treturn\n\t}\n\n\tvar (\n\t\tstmtx *sqlx.Stmt\n\t\trows *sqlx.Rows\n\t\tt time.Time\n\t)\n\n\tif conn.hasProfiling() {\n\t\tt = time.Now()\n\t}\n\n\tstmtx, err = preparex(conn, stmt)\n\tif err == nil {\n\t\tdefer stmtx.Close()\n\t\trows, err = stmtx.Queryx(stmt.Args()...)\n\t\tif err == nil {\n\t\t\tdefer rows.Close()\n\n\t\t\trow := map[string]any{}\n\t\t\tfor rows.Next() {\n\t\t\t\terr = rows.MapScan(row)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tmapper(row)\n\t\t\t\trowsReturned++\n\t\t\t}\n\t\t} else if errors.Is(err, sql.ErrNoRows) {\n\t\t\tif !conn.env.ErrorNoRows {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil && conn.hasVerbose() {\n\t\tconn.logErr.Println(err.Error())\n\t}\n\n\tconn.profilingStmt(stmt, err, t)\n\treturn\n}", "func Run() {\n\tfor key, val := range funcMap {\n\t\targs := funcArgs[key]\n\t\tcall(val, args)\n\t}\n}", "func (l *lex) run() {\n\tfor state := lexMapKey; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.tokens)\n}", "func (gm *gmap) run() {\n\t// Destruct gmap before exit.\n\tdefer func() {\n\t\tgm.raft.Stop()\n\t\tclose(gm.done)\n\t}()\n\t// Start gmap raft node.\n\tgo gm.raft.run()\n\t// Apply entries and snapshot get from raft.\n\tvar gmp gmapProgress\n\tfor {\n\t\tselect {\n\t\t// New apply.\n\t\tcase ap := <-gm.raft.applyc:\n\t\t\tgm.applyAll(&gmp, &ap)\n\t\t// gmap is closed.\n\t\tcase <-gm.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (r *reducer) run() {\nloop:\n\tfor {\n\t\t// Combine all data from the mappers.\n\t\tdata := make(map[string][]interface{})\n\t\tfor _, m := range r.mappers {\n\t\t\tkv, ok := <-m.C()\n\t\t\tif !ok {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tfor k, v := range kv {\n\t\t\t\tdata[k] = append(data[k], v)\n\t\t\t}\n\t\t}\n\n\t\t// Reduce each key.\n\t\tfor k, v := range data {\n\t\t\tr.fn(k, v, r)\n\t\t}\n\t}\n\n\t// Mark the channel as complete.\n\tclose(r.c)\n}", "func (p *MapToKeys) Run() {\n\tdefer p.CloseAllOutPorts()\n\tfor ip := range p.In().Chan {\n\t\tnewKeys := p.mapFunc(ip)\n\t\tip.AddKeys(newKeys)\n\t\tip.WriteAuditLogToFile()\n\t\tp.Out().Send(ip)\n\t}\n}", "func (f Filter) run(node *yaml.RNode) error {\n\tfor key, value := range f.Annotations {\n\t\tif err := node.PipeE(fsslice.Filter{\n\t\t\tFsSlice: f.FsSlice,\n\t\t\tSetValue: fsslice.SetEntry(key, value),\n\t\t\tCreateKind: yaml.MappingNode, // Annotations are MappingNodes.\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func _map(fn mapfn, chunks []string, c chan dict) {\n\tfor _, chunk := range chunks {\n\t\tgo fn(chunk, c)\n\t}\n}", "func (m *FlowMapper) Run() {\n\tm.flowMap = ReadFlowMap(m.mapfile)\n\tzips := GetZipNames(m.workdir)\n\tfor _, name := range zips {\n\t\tsourcePath := filepath.Join(m.workdir, name)\n\t\ttargetPath := filepath.Join(m.workdir, \"peflocus_\"+name)\n\t\tDeleteExisting(targetPath)\n\t\tlog.Println(\"INFO: map flows in\", sourcePath, \"to\", targetPath)\n\t\tm.doIt(sourcePath, targetPath)\n\t}\n}", "func (sm safeMap) run() {\n\tstore := make(map[string]interface{})\n\tfor command := range sm {\n\t\tswitch command.action {\n\t\tcase INSERT:\n\t\t\tstore[command.key] = command.value\n\t\tcase REMOVE:\n\t\t\tdelete(store, command.key)\n\t\tcase FLUSH:\n\t\t\tflush(store, command.keys)\n\t\tcase FIND:\n\t\t\tvalue, found := store[command.key]\n\t\t\tcommand.result <- findResult{value, found}\n\t\tcase COUNT:\n\t\t\tcommand.result <- len(store)\n\t\tcase TRUNCATE:\n\t\t\tclearMap(store)\n\t\tcase END:\n\t\t\tclose(sm)\n\t\t\tcommand.data <- store\n\t\t}\n\t}\n}", "func (a *aggregator) Run() {\n\tgo a.submitter()\n\n\tfor m := range a.in {\n\t\tfor _, out_m := range a.process(m) {\n\t\t\ta.out <- out_m\n\t\t}\n\t}\n}", "func doMap(\n\tjobName string, // the name of the MapReduce job\n\tmapTaskNumber int, // which map task this is\n\tinFile string,\n\tnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\tstream, err := ioutil.ReadFile(inFile)\n\tcheck_error(err)\n\n\tkeyVals := mapF(inFile, string(stream))\n\t\n\tresults := make(map[int][]KeyValue)\n\tfor _, kv := range keyVals {\n\t\t// Calculate R\n\t\tr := ihash(kv.Key) % nReduce\n\n\t\t// Map the results internally\n\t\tresults[r] = append(results[r], kv)\n\t}\n\n\tfor r, keyVals := range results {\n\t\toutputFileName := reduceName(jobName, mapTaskNumber, r)\n\t\tfile, err := os.Create(outputFileName)\n\t\tcheck_error(err)\n\t\tenc := json.NewEncoder(file)\n\n\t\tfor _, kv := range keyVals {\n\t\t\terr := enc.Encode(&kv)\n\t\t\tcheck_error(err)\n\t\t}\n\n\t\tfile.Close()\n\t}\n}", "func (n *Globals) Run() {\n\tfor _, node := range n.nodeMap {\n\t\tgo RunNode(node)\n\t}\n}", "func (conn *db) runMapRow(stmt Stmt, mapper MapMapper) (rowsReturned int, err error) {\n\tif err = conn.Connect(); err != nil {\n\t\treturn\n\t}\n\n\tvar (\n\t\tstmtx *sqlx.Stmt\n\t\tt time.Time\n\t\tvalues = map[string]any{}\n\t)\n\n\tif conn.hasProfiling() {\n\t\tt = time.Now()\n\t}\n\n\tstmtx, err = preparex(conn, stmt)\n\tif err == nil {\n\t\tdefer stmtx.Close()\n\n\t\terr = stmtx.QueryRowx(stmt.Args()...).MapScan(values)\n\t\tif err == nil {\n\t\t\tmapper(values)\n\t\t\trowsReturned = 1\n\t\t} else if errors.Is(err, sql.ErrNoRows) {\n\t\t\tif !conn.env.ErrorNoRows {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil && conn.hasVerbose() {\n\t\tconn.logErr.Println(err.Error())\n\t}\n\n\tconn.profilingStmt(stmt, err, t)\n\treturn\n}", "func (p *literalProcessor) run() {\n\tfor {\n\t\tselect {\n\t\tcase ch := <-p.done:\n\t\t\tclose(ch)\n\t\t\treturn\n\t\tcase p.c <- map[string]interface{}{\"\": p.val}:\n\t\t}\n\t}\n}", "func (e *binaryExprEvaluator) run() {\n\tfor {\n\t\t// Read LHS value.\n\t\tlhs, ok := <-e.lhs.C()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\t// Read RHS value.\n\t\trhs, ok := <-e.rhs.C()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\t// Merge maps.\n\t\tm := make(map[string]interface{})\n\t\tfor k, v := range lhs {\n\t\t\tm[k] = e.eval(v, rhs[k])\n\t\t}\n\t\tfor k, v := range rhs {\n\t\t\t// Skip value if already processed in lhs loop.\n\t\t\tif _, ok := m[k]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm[k] = e.eval(float64(0), v)\n\t\t}\n\n\t\t// Return value.\n\t\te.c <- m\n\t}\n\n\t// Mark the channel as complete.\n\tclose(e.c)\n}", "func (m *M) Run() int", "func (m *mapper) start() {\n\tm.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ,\n\t\tm.executor.min, m.executor.max, m.executor.interval)\n\tgo m.run()\n}", "func (s *scanner) run() {\n\tfor state := scanMain; state != nil; {\n\t\tstate = state(s)\n\t}\n\tclose(s.items)\n}", "func mapCall(reply *MyReply, mapf func(string, string) []KeyValue) {\n\n\tfmt.Println(reply.Content)\n\tfile, err := os.Open(reply.Content)\n\tdefer file.Close()\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot open %v\", err)\n\t}\n\tcontent, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot read %v\", err)\n\t}\n\n\tkva := mapf(reply.Content, string(content))\n\t//fmt.Println(kva)\n\t_ = kva\n\n\tfinishreply := caller(finishedMapJob)\n\n\tfmt.Println(\"task number %v done:\", finishreply.Content)\n\n}", "func (m *M) Run() int {}", "func (job *MapOnlyJob) Run() error {\n\tif job.NewMapperF == nil {\n\t\treturn errors.New(\"MapOnlyJob: NewMapperF undefined!\")\n\t}\n\tif job.Source == nil {\n\t\treturn errors.New(\"MapOnlyJob: Source undefined!\")\n\t}\n\ttotalPart := 0\n\tendss := make([][]chan error, 0, len(job.Source))\n\tfor i := range job.Source {\n\t\tpartCount, err := job.Source[i].PartCount()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tends := make([]chan error, 0, partCount)\n\t\tfor part := 0; part < partCount; part++ {\n\t\t\tend := make(chan error, 1)\n\t\t\tends = append(ends, end)\n\t\t\tgo func(i, part, totalPart int, end chan error) {\n\t\t\t\tend <- func() error {\n\t\t\t\t\tmapper := job.NewMapperF(i, part)\n\t\t\t\t\tkey, val := mapper.NewKey(), mapper.NewVal()\n\t\t\t\t\tcs := make([]sophie.Collector, 0, len(job.Dest))\n\t\t\t\t\tfor _, dst := range job.Dest {\n\t\t\t\t\t\tc, err := dst.Collector(totalPart)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"open collector for source %d part %d failed\", i, part)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefer c.Close()\n\t\t\t\t\t\tcs = append(cs, c)\n\t\t\t\t\t}\n\t\t\t\t\titer, err := job.Source[i].Iterator(part)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \" open source %d part %d failed\", i, part)\n\t\t\t\t\t}\n\t\t\t\t\tdefer iter.Close()\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tif err := iter.Next(key, val); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) != io.EOF {\n\t\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"next failed\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err := mapper.Map(key, val, cs); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) == EOM {\n\t\t\t\t\t\t\t\tlog.Print(\"EOM returned, exit early\")\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"mapping %v %v failed\", key, val)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn errorsp.WithStacksAndMessage(mapper.MapEnd(cs), \"map end failed\")\n\t\t\t\t}()\n\t\t\t}(i, part, totalPart, end)\n\t\t\ttotalPart++\n\t\t}\n\t\tendss = append(endss, ends)\n\t}\n\tvar errReturned error\n\tfor _, ends := range endss {\n\t\tfor part, end := range ends {\n\t\t\tlog.Printf(\"Waiting for mapper %d...\", part)\n\t\t\tif err := <-end; err != nil {\n\t\t\t\tlog.Printf(\"Error returned for part %d: %v\", part, err)\n\t\t\t\terrReturned = err\n\t\t\t}\n\t\t\tlog.Printf(\"No error for mapper %d...\", part)\n\t\t}\n\t}\n\treturn errReturned\n}", "func Map(array []interface{}, iterator ResultIterator) []interface{} {\r\n\tvar result = make([]interface{}, len(array))\r\n\tfor index, data := range array {\r\n\t\tresult[index] = iterator(data, index)\r\n\t}\r\n\treturn result\r\n}", "func (ob *Observation) run(f interface{}, args ...interface{}) []interface{} {\n\tfv := reflect.ValueOf(f)\n\tif len(ob.Name) == 0 {\n\t\tif rf := runtime.FuncForPC(fv.Pointer()); rf != nil {\n\t\t\tob.Name = rf.Name()\n\t\t}\n\t}\n\n\tfvtype := fv.Type()\n\tif len(args) != fvtype.NumIn() {\n\t\tpanic(fmt.Errorf(\"Incorrect number of inputs to %v\", ob.Name))\n\t}\n\n\tinputs := []reflect.Value{}\n\tfor i, a := range args {\n\t\ttmp := reflect.ValueOf(a)\n\t\ttmptype := tmp.Type()\n\t\tin := fvtype.In(i)\n\t\tif tmptype != in {\n\t\t\tpanic(fmt.Errorf(\"Invalid input (%v) to function (expected %v)\",\n\t\t\t\ttmptype.Kind(),\n\t\t\t\tin.Kind(),\n\t\t\t))\n\t\t}\n\t\tinputs = append(inputs, tmp)\n\t}\n\n\tret := ob.make_call(fv, inputs)\n\tif ob.Panic != nil {\n\t\treturn nil\n\t}\n\n\tfor _, r := range ret {\n\t\tob.Outputs = append(ob.Outputs, r.Interface())\n\t}\n\treturn ob.Outputs\n}", "func (gd Grid) Map(fn func(Point, Cell) Cell) {\n\tif gd.Ug == nil {\n\t\treturn\n\t}\n\tw := gd.Ug.Width\n\tcells := gd.Ug.Cells\n\tyimax := gd.Rg.Max.Y * w\n\tfor y, yi := 0, gd.Rg.Min.Y*w; yi < yimax; y, yi = y+1, yi+w {\n\t\tximax := yi + gd.Rg.Max.X\n\t\tfor x, xi := 0, yi+gd.Rg.Min.X; xi < ximax; x, xi = x+1, xi+1 {\n\t\t\tc := cells[xi]\n\t\t\tp := Point{X: x, Y: y}\n\t\t\tcells[xi] = fn(p, c)\n\t\t}\n\t}\n}", "func (l *Lexer) run() {\n\tfor state := l.state; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items)\n}", "func (w *funcWrapper) Run() {\n\t(*w)()\n}", "func (l *lexer) run() {\nmainLoop:\n\tfor {\n\t\tif !processWhitespace(l) {\n\t\t\tbreak\n\t\t}\n\t\t//fmt.Println(\"testing\", string(l.peek()))\n\t\tfound := false\n\tprocessLoop:\n\t\tfor _, processFunc := range processFunctions {\n\t\t\t//fmt.Println(\"func =\", processFunc)\n\t\t\tresult := processFunc(l)\n\t\t\t//fmt.Println(\"peek = \", string(l.peek()))\n\t\t\tswitch result {\n\t\t\tcase resultMatch:\n\t\t\t\tfound = true\n\t\t\t\tbreak processLoop\n\t\t\tcase resultMatchError:\n\t\t\t\tbreak mainLoop\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tl.errorf(\"Invalid token: '%s'\", string(l.peek()))\n\t\t\tbreak\n\t\t}\n\t}\n\tl.emit(itemEOF)\n\tclose(l.items)\n}", "func (bf *brainfog) run() {\n\tfor bf.ip < len(bf.program) {\n\t\terr := bf.doInstruction()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tclose(bf.outCh)\n}", "func doMap(\n\tjobName string, // the name of the MapReduce job\n\tmapTaskNumber int, // which map task this is\n\tinFile string,\n\tnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\tcontent, err := ioutil.ReadFile(inFile)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tkeyValues := mapF(inFile, string(content))\n\treduceFiles := make(map[string]*os.File)\n\n\tfor _, kv := range keyValues {\n\t\treduceTaskNumber := ihash(kv.Key) % nReduce\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\treduceFileName := reduceName(jobName, mapTaskNumber, reduceTaskNumber)\n\n\t\tif reduceFiles[reduceFileName] == nil {\n\t\t\tf, err := os.OpenFile(reduceFileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\treduceFiles[reduceFileName] = f\n\t\t}\n\n\t\tf := reduceFiles[reduceFileName]\n\t\tenc := json.NewEncoder(f)\n\t\tenc.Encode(&kv)\n\t}\n\n\tfor _, f := range reduceFiles {\n\t\tf.Close()\n\t}\n}", "func (w *SimpleMapReduce) Map(mapFn MapFn) *SimpleMapReduce {\n w.mapFn = mapFn\n return w\n}", "func (ctx Context) Map(input chan float64, f MapFunc) (output chan float64) {\n\toutput = make(chan float64, ctx.StreamBufferSize)\n\n\tgo func() {\n\t\tdefer close(output)\n\n\t\tfor x := range input {\n\t\t\toutput <- f(x)\n\t\t}\n\t}()\n\n\treturn output\n}", "func (m *Mare) Map(mapFunc func(input interface{}) []MapOutput) *Mare {\n\tif m.mapWorkerCnt == 0 {\n\t\tm.mapWorkerCnt++\n\t}\n\tif m.mapOutChan != nil {\n\t\tpanic(\"Map already in progress !\")\n\t}\n\n\t// Start the map\n\tm.mapOutWorkers.Add(m.mapWorkerCnt)\n\tm.mapOutChan = make(chan MapOutput, m.mapWorkerCnt)\n\tfor i := 0; i < m.mapWorkerCnt; i++ {\n\t\tgo func() {\n\t\t\tdefer m.mapOutWorkers.Done()\n\t\t\tfor item := range m.mapInChan {\n\t\t\t\tfor _, output := range mapFunc(item) {\n\t\t\t\t\tif m.trace {\n\t\t\t\t\t\tlog.Printf(\"Emit %v with key %v\", output.Key, output.Value)\n\t\t\t\t\t}\n\t\t\t\t\tm.mapOutChan <- output\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Wait for end of work and close\n\tgo func() {\n\t\tm.mapOutWorkers.Wait()\n\t\tclose(m.mapOutChan)\n\t}()\n\n\treturn m\n}", "func RunMapTask(req Task, namenodeID string) {\n\n\ttempFileDir := Config.LocalfileDir + \"/\" + Config.TempFile\n\n\tfor _, fileName := range req.FileList {\n\t\tfmt.Printf(\"Start Map Task for File %s\\n\", fileName)\n\n\t\t//Fetch SDFSfile to local file system\n\t\tGetFile([]string{fileName, fileName})\n\n\t\t//Scan file\n\t\tdecodedFileName := Config.DecodeFileName(fileName)\n\t\t\n\t\tdata, err := os.Open(Config.LocalfileDir + \"/\" + decodedFileName)\n\t\tdefer data.Close()\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Datanode.RunMapTask: src_file %s os.Open() error\\n\", decodedFileName)\n\t\t\tlog.Println(\"os.Open() error\")\n\t\t\treturn\n\t\t}\n\n\t\tvar scanner = bufio.NewScanner(data)\n\n\t\tvar lineCnt = 0\n\t\tvar buf = \"\"\n\n\t\tfor scanner.Scan() {\n\t\t\t//Deal with EOF\n\t\t\tif lineCnt < 10 {\n\t\t\t\tbuf += scanner.Text() + \"\\n\"\n\t\t\t\tlineCnt++\n\t\t\t} else {\n\t\t\t\t// MapFunc(req.TaskExe)\n\t\t\t\ttemp, err := os.Create(tempFileDir)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Datanode.RunMapTask.Scanner: os.Create() error\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t_, err = temp.WriteString(buf)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Datanode.RunMapTask: temp_file WriteString error\")\n\t\t\t\t\tlog.Println(\"temp_file WriteString error\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttemp.Close()\n\n\t\t\t\tcmd := exec.Command(Config.LocalfileDir+\"/\"+req.TaskExe, tempFileDir)\n\t\t\t\tres, err := cmd.Output()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Datanode.RunMapTask: exec.Command.Output Error\")\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tparseMapRes(res, req.Output)\n\n\t\t\t\tlineCnt = 0\n\t\t\t\tbuf = \"\"\n\t\t\t}\n\t\t}\n\n\t\tif lineCnt != 0 {\n\t\t\ttemp, err := os.Create(tempFileDir)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"os.Create() error\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_, err = temp.WriteString(buf)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Datanode.RunMapTask: temp_file WriteString error\")\n\t\t\t\tlog.Println(\"temp_file WriteString error\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcmd := exec.Command(Config.LocalfileDir+\"/\"+req.TaskExe, tempFileDir)\n\t\t\tres, err := cmd.Output()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Datanode.RunMapTask: exec.Command.Output Error\")\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tparseMapRes(res, req.Output)\n\t\t}\n\n\t\tos.Remove(Config.LocalfileDir + \"/\" + decodedFileName)\n\t}\n\n\tos.Remove(tempFileDir)\n\n\tfmt.Printf(\"Map Task %d succeed!\\n\", req.TaskID)\n\n\t//When finish work, RPC namanode\n\taddr := Config.GetIPAddressFromID(namenodeID)\n\tclient := NewClient(addr + \":\" + Config.NamenodePort)\n\tclient.Dial()\n\n\tvar res int\n\tif err := client.rpcClient.Call(\"Namenode.SendWorkerFinishMsg\", Mem.LocalID, &res); err != nil {\n\t\tfmt.Println(\"Datanode.RPC.Namenode.SendWorkerFinishMsg() error\")\n\t}\n\n\tclient.Close()\n\n\treturn\n}", "func (l *lexer) run() {\n\tfor state := lexStart; state != nil; {\n\t\tstate = state(l)\n\t}\n}", "func (cl *Client) MapFunc(\n\tenv map[string]string,\n\tfn interface{},\n\targsList ...interface{},\n) []Result {\n\tresults := make([]Result, len(argsList))\n\n\twg := sync.WaitGroup{}\n\twg.Add(len(argsList))\n\n\tfmt.Fprint(os.Stderr, \"\\n\")\n\tfor i, args := range argsList {\n\t\tgo func(i int, args interface{}) {\n\t\t\tresult, err := cl.RunFunc(env, fn, args)\n\t\t\tif err != nil {\n\t\t\t\tresult = Result{\n\t\t\t\t\tCode: -1,\n\t\t\t\t\tOutput: fmt.Sprintf(\"Failed to run function: %v\", err),\n\t\t\t\t}\n\t\t\t}\n\t\t\tresults[i] = result\n\n\t\t\tif result.Code == 0 {\n\t\t\t\tfmt.Fprint(os.Stderr, \".\")\n\t\t\t} else {\n\t\t\t\tfmt.Fprint(os.Stderr, \"X\")\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}(i, args)\n\t}\n\twg.Wait()\n\tfmt.Fprint(os.Stderr, \"\\n\")\n\n\treturn results\n}", "func (mpd *MPD) Run() error {\n\tmpd.Info(\"maportd run\")\n\tfor key, mapper := range mpd.portMaps {\n\t\tmpd.Info(\"starting %s\", key)\n\t\tif err := mapper.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func TestMap() {\n\txs := []a{1, 2, 3, 4, 5}\n\tbs := gunc.Map(func(x a) b { return x.(int) + 2 }, xs)\n\tfmt.Printf(\"mapped:: %s\", bs)\n\tfor i := range bs {\n\t\tif bs[i].(int) != xs[i].(int)+2 {\n\t\t\tlog.Fatalf(\"mapping failed:: expected %d got %d\", (xs[i].(int) + 2), bs[i].(int))\n\t\t}\n\t}\n\tlog.Println(\"Map succeeded...\")\n}", "func (l *LexInner) Run(f func(rune) bool) (acceptnum int) {\n\tfor l.One(f) {\n\t\tacceptnum++\n\t}\n\treturn\n}", "func (c policyTestCase) run(t *testing.T, pub, prv *base.ClusterContext, contextMap map[string]string) {\n\n\tt.Run(\n\t\tc.name,\n\t\tfunc(t *testing.T) {\n\t\t\tif c.skip != nil {\n\t\t\t\tif skipReason := c.skip(); skipReason != \"\" {\n\t\t\t\t\tt.Skipf(skipReason)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, step := range c.steps {\n\n\t\t\t\tstep.run(t, pub, prv, contextMap)\n\t\t\t\tif base.IsTestInterrupted() {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tbase.StopIfInterrupted(t)\n\t\t})\n}", "func (l *Lexer) run() {\n\tfor state := lexAction; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.tokens)\n}", "func mapIter(_ *reflect.MapIter, mapVal reflect.Value) *reflect.MapIter {\n\tif !mapVal.IsValid() {\n\t\treturn nil\n\t}\n\treturn mapVal.MapRange()\n}", "func (l *Clogger) run() {\n\tvar m string\n\tfor m = range l.in {\n\t\tfmt.Fprint(l.w, m)\n\t}\n\treturn\n}", "func (l *lexer) run() {\r\n\tfor l.state = lexAny(l); l.state != nil; {\r\n\t\tl.state = l.state(l)\r\n\t}\r\n\tclose(l.tokens)\r\n}", "func (e *EventEmitter) Map(fn func(*Event) Data) Executor {\n\te.mapFunc = fn\n\treturn e\n}", "func (w *WemoMap) Run() {\n\tnodeMap := map[string]string{}\n\tlocations := map[string]string{}\n\tfor m := range w.Info {\n\t\tf := strings.Split(m.(string), \",\")\n\t\tnodeMap[f[0]] = f[1]\n fmt.Println(nodeMap)\n\t\tif len(f) > 2 {\n\t\t\tlocations[f[0]] = f[2]\n fmt.Println(locations)\n\t\t}\n\t}\n\n\tvar group int\n\tfor m := range w.In {\n\t\tw.Out.Send(m)\n fmt.Println(\"M: \", m)\n\t\tif data, ok := m.(map[string]int); ok {\n fmt.Println(\"Data: \", data)\n\t\t\tswitch {\n\t\t\tcase data[\"<RF12demo>\"] > 0:\n\t\t\t\tgroup = data[\"group\"]\n\t\t\tcase data[\"<node>\"] > 0:\n\t\t\t\tkey := fmt.Sprintf(\"RFg%di%d\", group, data[\"<node>\"])\n\t\t\t\tif loc, ok := locations[key]; ok {\n\t\t\t\t\tw.Out.Send(flow.Tag{\"<location>\", loc})\n\t\t\t\t}\n\t\t\t\tif tag, ok := nodeMap[key]; ok {\n\t\t\t\t\tw.Out.Send(flow.Tag{\"<dispatch>\", tag})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (g *Glimit) Run(f func()) {\n\tg.c <- struct{}{}\n\tgo func() {\n\t\tf()\n\t\t<-g.c\n\t}()\n}", "func (p *spanParser) run() {\n\tfor p.state = parseSpan; p.state != nil; {\n\t\tp.state = p.state(p)\n\t}\n\tclose(p.spanChan)\n}", "func (s Stream) Map(fn func(r Record) (Record, error)) Stream {\n\treturn s.Pipe(func() func(r Record) (Record, error) {\n\t\treturn fn\n\t})\n}", "func (s *JsonEntryCounter) Mapper(r io.Reader, w io.Writer) error {\n\tlog.Printf(\"map_input_file %s\", os.Getenv(\"map_input_file\"))\n\twg, out := mrproto.JsonInternalOutputProtocol(w)\n\n\t// for efficient counting, use an in-memory counter that flushes the least recently used item\n\t// less Mapper output makes for faster sorting and reducing.\n\tcounter := lru.NewLRUCounter(func(k interface{}, v int64) {\n\t\tout <- mrproto.KeyValue{k, v}\n\t}, 100)\n\n\tfor line := range mrproto.RawInputProtocol(r) {\n\t\tvar record map[string]json.RawMessage\n\t\tif err := json.Unmarshal(line, &record); err != nil {\n\t\t\tgomrjob.Counter(\"example_mr\", \"Unmarshal Error\", 1)\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgomrjob.Counter(\"example_mr\", \"Map Lines Read\", 1)\n\t\tcounter.Incr(\"lines_read\", 1)\n\t\tfor k, _ := range record {\n\t\t\tcounter.Incr(k, 1)\n\t\t}\n\t}\n\tcounter.Flush()\n\tclose(out)\n\twg.Wait()\n\treturn nil\n}", "func (inst *Instance) Run(input map[string]interface{}) (output map[string]interface{}, err error) {\n\n\t// Get the Scope of the CML pipeline.\n\t// Scope is the collection of the data in the CML\n\tscope, err := NewPipelineScope(input, inst.def.labels)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Log the time\n\tstart := time.Now()\n\n\t//Check the type of the input of the pipeline.\n\tfor key, _ := range inst.def.input {\n\n\t\ttemp, ok := inst.def.input[key].(PipelineInput)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = types.ValidateType(temp.Type, input[key])\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t}\n\n\t//Run the tasks.\n\tfor key, task := range inst.def.tasks {\n\t\ttask.Position()\n\t\tscope, err = task.Eval(scope, inst.logger)\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error %s in task \\\"%s-%v\\\" \", err.Error(), task.Name(), key)\n\t\t}\n\n\t}\n\n\t// Set the output.\n\n\tif inst.def.output.Data != nil {\n\t\tmf := GetMapperFactory()\n\t\tmappings := make(map[string]interface{})\n\n\t\t// Type Switch\n\t\tswitch t := inst.def.output.Data.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tfor key, val := range t {\n\t\t\t\tmappings[key] = val\n\t\t\t}\n\t\tdefault:\n\t\t\tmappings[\"data\"] = inst.def.output.Data\n\t\t}\n\n\t\t// Get the data from output expression\n\t\toutMapper, err := mf.NewMapper(mappings)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toutput, err = outMapper.Apply(scope)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar definedType data.Type\n\n\t\t// Check if the output is defined as dataframe or map.\n\t\tif inst.def.output.Type == \"dataframe\" || inst.def.output.Type == \"map\" {\n\t\t\tdefinedType, err = data.ToTypeEnum(\"object\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tgivenType, err := data.GetType(output)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif definedType != givenType {\n\t\t\t\treturn nil, fmt.Errorf(\"Type mismatch in output. Defined type [%s] passed type [%s]\", definedType, givenType)\n\t\t\t}\n\n\t\t\tinst.logger.Infof(\"The output took %v to calculate\", time.Since(start))\n\n\t\t\treturn output, nil\n\t\t}\n\n\t\tdefinedType, _ = data.ToTypeEnum(inst.def.output.Type)\n\n\t\tfor key, _ := range output {\n\n\t\t\tgivenType, err := data.GetType(output[key])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif definedType != givenType {\n\t\t\t\treturn nil, fmt.Errorf(\"Type mismatch in output. Defined type [%s] passed type [%s]\", definedType, givenType)\n\t\t\t}\n\t\t}\n\n\t}\n\tinst.logger.Infof(\"The output took %v to calculate\", time.Since(start))\n\n\treturn output, nil\n\n}", "func (p *parser) run() {\n\tfor parserState := parseStart; parserState != nil; {\n\t\tparserState = parserState(p)\n\t}\n\tclose(p.records)\n}", "func (isf intSliceFunctorImpl) Map(fn func(int) int) IntSliceFunctor {\n\tif len(isf.ints) < 100 {\n\t\tisf.ints = serialIntMapper(isf.ints, fn)\n\t\treturn isf\n\t}\n\tisf.ints = concurrentIntMapper(isf.ints, fn)\n\treturn isf\n}", "func (r RunFunc) Run(ctx context.Context) {\n\tr(ctx)\n}", "func (conn *db) runSlice(stmt Stmt, mapper SliceMapper) (rowsReturned int, err error) {\n\tif err = conn.Connect(); err != nil {\n\t\treturn\n\t}\n\n\tvar (\n\t\tstmtx *sqlx.Stmt\n\t\trows *sqlx.Rows\n\t\tvalues []any\n\t\tt time.Time\n\t)\n\n\tif conn.hasProfiling() {\n\t\tt = time.Now()\n\t}\n\n\tstmtx, err = preparex(conn, stmt)\n\tif err == nil {\n\t\tdefer stmtx.Close()\n\t\trows, err = stmtx.Queryx(stmt.Args()...)\n\t\tif err == nil {\n\t\t\tdefer rows.Close()\n\t\t\tfor rows.Next() {\n\t\t\t\tvalues, err = rows.SliceScan()\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tmapper(values)\n\t\t\t\trowsReturned++\n\t\t\t}\n\t\t} else if errors.Is(err, sql.ErrNoRows) {\n\t\t\tif !conn.env.ErrorNoRows {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil && conn.hasVerbose() {\n\t\tconn.logErr.Println(err.Error())\n\t}\n\n\tconn.profilingStmt(stmt, err, t)\n\treturn\n}", "func (p *AsmParser) run() {\n\tdefer close(p.Output)\n\n\tvar errs errorList\n\n\tif p.Error != nil {\n\t\treturn\n\t}\n\n\tvar i asm // instruction, reset to 0 after every write\n\tvar err error\n\tvar d, c, j asm // dest, comp, jump, OR together for final instruction\n\n\twriteResult := func() {\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\n\t\tif errs == nil {\n\t\t\tp.Output <- fmt.Sprintf(\"%.16b\", i)\n\t\t}\n\n\t\ti = 0\n\t}\n\n\tfor index, lex := range p.lexemes {\n\n\t\tswitch lex.instruction {\n\n\t\t// possible edge case, hitting EOF before an EOL\n\t\tcase asmEOF:\n\t\t\tfallthrough\n\n\t\tcase asmEOL:\n\t\t\tprev := p.previousInstruction(index)\n\n\t\t\tif prev.instruction != asmLABEL {\n\t\t\t\twriteResult()\n\t\t\t}\n\n\t\tcase asmAINSTRUCT:\n\t\t\tprev := p.previousInstruction(index)\n\n\t\t\tif prev.instruction == asmAINSTRUCT {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"WARNING - redundant loading of A-Register on line %d\\n\", prev.lineNum)\n\t\t\t}\n\n\t\t\ti, err = p.mapToA(lex)\n\n\t\tcase asmLABEL:\n\t\t\tindex += 2 // skip label and EOL\n\t\t\tcontinue\n\n\t\tcase asmJUMP:\n\t\t\tj, err = mapJmp(lex.value)\n\t\t\ti = i | j\n\n\t\tcase asmCOMP:\n\t\t\tc, err = mapCmp(lex.value)\n\t\t\ti = i | c\n\n\t\tcase asmDEST:\n\t\t\td, err = mapDest(lex.value)\n\t\t\ti = i | d\n\t\t}\n\n\t\tindex++\n\t}\n\n\tp.Error = errs.asError()\n}", "func (lx *lexer) run() {\n\tfor state := lxBase; state != nil; {\n\t\tstate = state(lx)\n\t}\n\tclose(lx.tokStream)\n}", "func (b *Zipf) Run(ctx context.Context) *Result {\n\tresults := NewResult()\n\tif b.client == nil {\n\t\tresults.err = fmt.Errorf(\"No client set for Zipf\")\n\t\treturn results\n\t}\n\toperation := \"SetBit\"\n\tif b.Operation == \"clear\" {\n\t\toperation = \"ClearBit\"\n\t}\n\n\tfor n := 0; n < b.Iterations; n++ {\n\t\t// generate IDs from Zipf distribution\n\t\trowIDOriginal := b.rowRng.Uint64()\n\t\tprofIDOriginal := b.columnRng.Uint64()\n\t\t// permute IDs randomly, but repeatably\n\t\trowID := b.rowPerm.Next(int64(rowIDOriginal))\n\t\tprofID := b.columnPerm.Next(int64(profIDOriginal))\n\n\t\tquery := fmt.Sprintf(\"%s(frame='%s', rowID=%d, columnID=%d)\", operation, b.Frame, b.MinRowID+int64(rowID), b.MinColumnID+int64(profID))\n\t\tstart := time.Now()\n\t\t_, err := b.ExecuteQuery(ctx, b.Index, query)\n\t\tresults.Add(time.Since(start), nil)\n\t\tif err != nil {\n\t\t\tresults.err = err\n\t\t\treturn results\n\t\t}\n\t}\n\treturn results\n}", "func Map[T, R any](it TryNextor[T], mapper func(T) R) TryNextor[R] {\n\treturn pureMap[T, R]{\n\t\tinner: it,\n\t\tmapper: mapper,\n\t}\n}", "func (l *lexer) run() {\n\tfor state := lexText; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items)\n}", "func (l *lexer) run() {\n\tfor l.state = lexAll; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n\tclose(l.Items)\n}", "func (s *Int64) Iterate(fn func(int64)) {\n\tfor val := range s.m {\n\t\tfn(val)\n\t}\n}", "func doMap(\njobName string, // the name of the MapReduce job\nmapTaskNumber int, // which map task this is\ninFile string,\nnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\nmapF func(file string, contents string) []KeyValue,\n) {\n\t// TODO:\n\t// You will need to write this function.\n\t// You can find the filename for this map task's input to reduce task number\n\t// r using reduceName(jobName, mapTaskNumber, r). The ihash function (given\n\t// below doMap) should be used to decide which file a given key belongs into.\n\t//\n\t// The intermediate output of a map task is stored in the file\n\t// system as multiple files whose name indicates which map task produced\n\t// them, as well as which reduce task they are for. Coming up with a\n\t// scheme for how to store the key/value pairs on disk can be tricky,\n\t// especially when taking into account that both keys and values could\n\t// contain newlines, quotes, and any other character you can think of.\n\t//\n\t// One format often used for serializing data to a byte stream that the\n\t// other end can correctly reconstruct is JSON. You are not required to\n\t// use JSON, but as the output of the reduce tasks *must* be JSON,\n\t// familiarizing yourself with it here may prove useful. You can write\n\t// out a data structure as a JSON string to a file using the commented\n\t// code below. The corresponding decoding functions can be found in\n\t// common_reduce.go.\n\t//\n\t// enc := json.NewEncoder(file)\n\t// for _, kv := ... {\n\t// err := enc.Encode(&kv)\n\t//\n\t// Remember to close the file after you have written all the values!\n\n\n\t//setp 1 read file\n\tcontents, err := ioutil.ReadFile(inFile)\n\tif err != nil {\n\t\tlog.Fatal(\"do map error for inFile \",err)\n\t}\n\t//setp 2 call user user-map method ,to get kv\n\tkvResult := mapF(inFile, string(contents))\n\n\t/**\n\t * setp 3 use key of kv generator nReduce file ,partition\n\t * a. create tmpFiles\n\t * b. create encoder for tmpFile to write contents\n\t * c. partition by key, then write tmpFile\n\t */\n\n\tvar tmpFiles [] *os.File = make([] *os.File, nReduce)\n\tvar encoders [] *json.Encoder = make([] *json.Encoder, nReduce)\n\n\tfor i := 0; i < nReduce; i++ {\n\t\ttmpFileName := reduceName(jobName,mapTaskNumber,i)\n\t\ttmpFiles[i],err = os.Create(tmpFileName)\n\t\tif err!=nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tdefer tmpFiles[i].Close()\n\t\tencoders[i] = json.NewEncoder(tmpFiles[i])\n\t\tif err!=nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tfor _ , kv := range kvResult {\n\t\thashKey := int(ihash(kv.Key)) % nReduce\n\t\terr := encoders[hashKey].Encode(&kv)\n\t\tif err!=nil {\n\t\t\tlog.Fatal(\"do map encoders \",err)\n\t\t}\n\t}\n\n}", "func (l *lexer) run() {\n\tfor l.state = lexRule; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n\tclose(l.items)\n}", "func mapSum(itr Iterator, m *mapper) {\n\tn := float64(0)\n\tfor k, v := itr.Next(); k != 0; k, v = itr.Next() {\n\t\tn += v.(float64)\n\t}\n\tm.emit(itr.Time(), n)\n}", "func InvokeMap(cases map[string]interface{}) {\n\trunenv := runtime.CurrentRunEnv()\n\tdefer runenv.Close()\n\n\tif fn, ok := cases[runenv.TestCase]; ok {\n\t\tinvoke(runenv, fn)\n\t} else {\n\t\tmsg := fmt.Sprintf(\"unrecognized test case: %s\", runenv.TestCase)\n\t\tpanic(msg)\n\t}\n}", "func Run() {\n\tmu.Lock()\n\tfns := funcs\n\tfuncs = nil\n\tmu.Unlock()\n\tfor i := len(fns) - 1; i >= 0; i-- {\n\t\tfns[i]()\n\t}\n}", "func Map[T any, R any](collection []T, iteratee func(T, int) R) []R {\n\tresult := make([]R, len(collection))\n\n\tfor i, item := range collection {\n\t\tresult[i] = iteratee(item, i)\n\t}\n\n\treturn result\n}", "func mapiterinit(t unsafe.Pointer, m unsafe.Pointer, it *hiter)", "func (r *Runner) run() {\n\tfor {\n\t\ttask := r.rq.Pop()\n\t\tr.process(task)\n\t}\n}", "func (sf JobFunc) Run() {\n\tsf()\n}", "func (f *Map) Call(line int, i *Interpreter, arguments []interface{}) (interface{}, error) {\n\tfun, ok := arguments[0].(Function)\n\tif !ok {\n\t\treturn nil, &executionError{line, \"<map> expects a function as first parameter\"}\n\t}\n\n\tif fun.Arity() != 1 {\n\t\treturn nil, &executionError{line, \"<map> expects a function which accepts one argument\"}\n\t}\n\n\tlist, ok := arguments[1].(List)\n\tif !ok {\n\t\treturn nil, &executionError{line, \"<map> expects a list as second parameter\"}\n\t}\n\n\tvar mappedElements []interface{}\n\n\trestOfList := list\n\n\tfor restOfList.Len() > 0 {\n\t\tvar args []interface{}\n\t\targs = append(args, restOfList.First())\n\n\t\tnewEl, err := fun.Call(line, i, args)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmappedElements = append(mappedElements, newEl)\n\n\t\trestOfList = restOfList.Rest()\n\t}\n\n\treturn NewArrayList(mappedElements), nil\n}", "func (p *SingleLineParser) run() {\n\tfor input := range p.inputChan {\n\t\tp.process(input)\n\t}\n\tp.lineHandler.Stop()\n}", "func (m *Module) Run(fn interface{}) {\n\ttransformedFunc, err := MakeFuncInjectable(fn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tm.Call(\"run\", transformedFunc)\n}", "func (c *ConsensusState) run() {\n\tfor {\n\t\tselect {\n\t\tcase op := <-c.accessOp:\n\t\t\tlogger.Debugf(\"cycle %v execute op\", c.cycleId)\n\t\t\top.Execute()\n\t\t}\n\t}\n}", "func run(instance apply.Strategy, recorded, local, remote, expected map[string]interface{}) {\n\trunWith(instance, recorded, local, remote, expected, fakeResources)\n}", "func (l *lexer) run() {\n\tfor state := lexBlock; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items) // No more tokens will be delivered.\n}", "func doMap(\n\tjobName string, // The name of the MapReduce job\n\tmapTaskNumber int, // Which map task this is\n\tinFile string, // File name of the input file.\n\tnReduce int, // The number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\tfileContent, err := ioutil.ReadFile(inFile)\n\tif err != nil {\n\t\tfmt.Printf(\n\t\t\t\"Failed to read file=%s, err=%s\\n\",\n\t\t\tinFile,\n\t\t\terr.Error())\n\t\tpanic(err)\n\t}\n\n\t// Map file content to key-value pairs.\n\tkvs := mapF(inFile, string(fileContent))\n\n\t// Creates per-reducer JSON serializer.\n\tencoders := make([]*json.Encoder, nReduce)\n\tfor i := 0; i < nReduce; i++ {\n\t\toutputFileName := reduceName(jobName, mapTaskNumber, i)\n\n\t\treducerFile, err := os.OpenFile(\n\t\t\toutputFileName, os.O_CREATE|os.O_WRONLY, 0777)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\n\t\t\t\t\"Faild to open file=%s in write mode, err=%s\\n\",\n\t\t\t\toutputFileName, err.Error())\n\t\t\tpanic(err)\n\t\t}\n\n\t\tdefer reducerFile.Close()\n\n\t\tencoders[i] = json.NewEncoder(reducerFile)\n\t}\n\n\t// Put each key-value pair to its corresponding reducer file\n\t// sharded by key.\n\tfor _, kv := range kvs {\n\t\treducerTaskNumber := ihash(kv.Key) % uint32(nReduce)\n\t\terr := encoders[reducerTaskNumber].Encode(&kv)\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\n\t\t\t\t\"Failed to encode (k=%s, v=%s), err=%s\\n\",\n\t\t\t\tkv.Key,\n\t\t\t\tkv.Value,\n\t\t\t\terr.Error())\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (l *lexer) run() {\n\tfor l.state = lexText; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n}", "func (l *lexer) run() {\n\tfor l.state = lexText; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n}", "func Map[I, O any](input <-chan I, mapFunc func(element I) O) <-chan O {\n\toutput := make(chan O)\n\tgo func() {\n\t\tdefer close(output)\n\n\t\tfor element := range input {\n\t\t\toutput <- mapFunc(element)\n\t\t}\n\t}()\n\n\treturn output\n}", "func TestMapInt(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ts []int\n\t\texpected []int\n\t}{\n\t\t{[]int{0, 1, 2}, []int{0, 2, 4}},\n\t\t{[]int{-1}, []int{-2}},\n\t\t{[]int{}, []int{}},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.MapInt(test.s, func(i int) int {\n\t\t\treturn i * 2\n\t\t})\n\t\tassert.True(t, primitives.EqSlices(&actual, &test.expected), \"Expected MapInt(%q, fn) to be %q, got %v\", test.s, test.expected, actual)\n\t}\n}", "func reflect_mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, hiter unsafe.Pointer)", "func (l *Lexer) run() {\n\tdefer close(l.items)\n\teor := len(l.rec.States) - 1\n\tfor {\n\t\tfor i, state := range l.rec.States {\n\t\t\tif !state.StateFn(l, state.ItemType, state.Emit) {\n\t\t\t\tl.rec.ErrorFn(l)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif i == eor || l.eof {\n\t\t\t\tl.Emit(ItemEOR)\n\t\t\t}\n\t\t}\n\t\tif l.Peek() == EOF {\n\t\t\tl.Emit(ItemEOF)\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (r Range) iterate(fn func(*buffer.View)) {\n\tr.pk.buf.SubApply(r.offset, r.length, fn)\n}", "func run(name string, b *testing.B, count int, fn func(buf *Buffer, r *Reader)) {\r\n\tb.Run(name, func(b *testing.B) {\r\n\t\tbuf := NewBuffer(count * 20)\r\n\t\tr := NewReader()\r\n\t\tb.ReportAllocs()\r\n\t\tb.ResetTimer()\r\n\t\tfor n := 0; n < b.N; n++ {\r\n\t\t\tbuf.Reset(\"test\")\r\n\t\t\tfn(buf, r)\r\n\t\t}\r\n\t})\r\n}", "func (t *Subrogationcode) Run(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\treturn t.Invoke(stub, function, args)\n}", "func (r *reducer) start() {\n\tfor _, m := range r.mappers {\n\t\tm.start()\n\t}\n\tgo r.run()\n}", "func (b *QuerySnipBroadcaster) Run() {\n\tfor {\n\t\ts := <-b.in\n\t\tfor _, recipient := range b.recipients {\n\t\t\trecipient <- s\n\t\t}\n\t}\n}", "func Run(policy policy.Policy, eventMapper func(events.Event) effects.Effect) error {\n\treturn ApplyPolicy(policy, eventMapper).Run()\n}", "func Map(ctx context.Context,\n\tf func(args ...interface{}) interface{},\n\tinStreams ...<-chan interface{}) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor {\n\t\t\ta := []interface{}{}\n\t\t\tfor i := 0; i < len(inStreams); i++ {\n\t\t\t\tselect {\n\t\t\t\tcase x := <-inStreams[i]:\n\t\t\t\t\ta = append(a, x)\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\ts <- f(a...)\n\t\t}\n\t}()\n\treturn s\n}", "func (tq *Query) MapScan(m map[string]interface{}) error {\n\tspan := tq.newChildSpan(tq.ctx)\n\terr := tq.Query.MapScan(m)\n\ttq.finishSpan(span, err)\n\treturn err\n}", "func (s *Stream) Map(f interface{}) *Stream {\n\top, err := unary.MapFunc(f)\n\tif err != nil {\n\t\ts.drainErr(err)\n\t\treturn s\n\t}\n\treturn s.Transform(op)\n}", "func MapRawQuery(itr Iterator) interface{} {\n\tvar values []*rawQueryMapOutput\n\tfor k, v := itr.Next(); k != -1; k, v = itr.Next() {\n\t\tval := &rawQueryMapOutput{k, v}\n\t\tvalues = append(values, val)\n\t}\n\treturn values\n}", "func (c *Cuckoo) Map(iter func(c *Cuckoo, key Key, val Value) (stop bool)) {\n\tif c.emptyKeyValid {\n\t\titer(c, c.emptyKey, c.emptyValue)\n\t}\n\n\tfor _, t := range c.tables {\n\t\tfor _, s := range t.buckets {\n\t\t\tfor _, b := range s {\n\t\t\t\tif b.key != c.emptyKey {\n\t\t\t\t\tif iter(c, b.key, b.val) {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (req *QueryRequest) Run(\n\tapi *elasticsearch.Client,\n\to ...func(*esapi.SearchRequest),\n) (res *esapi.Response, err error) {\n\tvar b bytes.Buffer\n\terr = json.NewEncoder(&b).Encode(req.Map())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts := append([]func(*esapi.SearchRequest){api.Search.WithBody(&b)}, o...)\n\n\treturn api.Search(opts...)\n}", "func (self *averageCache) run() {\n\tvar flushLimit int\n\tvar dataPoints []*whisper.TimeSeriesPoint\n\tfor {\n\t\tselect {\n\t\tcase <- self.closeChan: // The cache is ordered to close\n\t\t\tlog.Debug(\"Close signal\")\n\t\t\tself.close()\n\t\tcase flushLimit = <- self.flushChan: // A flush is queued\n\t\t\tlog.Debug(\"Flush Signal\")\n\t\t\tself.flush(flushLimit)\n\t\tcase dataPoints = <- self.inputChan: // An insert is queued\n\t\t\tlog.Debug(\"Data Signal\")\n\t\t\tself.insert(dataPoints)\n\t\t}\n\t}\n}", "func (d *Dump) Map(f func(item Item) error) error {\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tvar err error\n\tfor _, i := range d.items {\n\t\tif err = f(i); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif d.persist == PERSIST_WRITES {\n\t\treturn d.save()\n\t}\n\n\treturn nil\n}", "func (tokens Tokens) Map(fn func(string) string) Tokens {\n\tres := make(Tokens, len(tokens))\n\tfor i := range tokens {\n\t\tres[i] = fn(tokens[i])\n\t}\n\treturn res\n}", "func (this *Connection) run() {\n\tgo this.routineMain()\n}" ]
[ "0.68111986", "0.66373616", "0.652552", "0.6495338", "0.6436401", "0.63472146", "0.63425344", "0.63047", "0.62997466", "0.61346835", "0.6010561", "0.58976614", "0.5892991", "0.5884971", "0.58690274", "0.5847752", "0.58225423", "0.5793468", "0.5769612", "0.5762774", "0.57327944", "0.57198346", "0.5705999", "0.5694741", "0.56913996", "0.56601334", "0.5642414", "0.5615923", "0.56003314", "0.55831283", "0.55787414", "0.55427617", "0.5498346", "0.54948485", "0.5447714", "0.54432887", "0.5434674", "0.5432373", "0.5428736", "0.54050255", "0.5369757", "0.53681475", "0.5358741", "0.5322334", "0.53064597", "0.53033984", "0.53008455", "0.5299517", "0.52989566", "0.5291884", "0.52871114", "0.52830386", "0.5279788", "0.5278636", "0.52628857", "0.5262695", "0.525263", "0.5250975", "0.52383214", "0.5237991", "0.52376676", "0.5235107", "0.5214063", "0.5211766", "0.5211595", "0.52042663", "0.5196575", "0.5195299", "0.51945394", "0.51902807", "0.51901585", "0.5184115", "0.51839703", "0.51819485", "0.5174873", "0.5162802", "0.51593804", "0.51588786", "0.5155498", "0.5155498", "0.51393443", "0.5124645", "0.51198125", "0.5092679", "0.5089343", "0.5088026", "0.5083985", "0.50819916", "0.5072578", "0.5071333", "0.50672203", "0.506065", "0.5056148", "0.5054239", "0.5052492", "0.5047695", "0.50428516", "0.50381076", "0.5035496", "0.50351113" ]
0.816427
0
emit sends a value to the mapper's output channel.
func (m *mapper) emit(key int64, value interface{}) { // Encode the timestamp to the beginning of the key. binary.BigEndian.PutUint64(m.key, uint64(key)) // OPTIMIZE: Collect emit calls and flush all at once. m.c <- map[string]interface{}{string(m.key): value} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *reducer) emit(key string, value interface{}) {\n\tr.c <- map[string]interface{}{key: value}\n}", "func (c channelConveyor) Emit(v interface{}) error {\n\tc.outputCh <- v\n\n\treturn nil\n}", "func (b Broadcaster) Write(v interface{}) {\n\tutils.Debugf(\"Sending %v\\n\", v)\n\tb.Sendc <- v // write value on send channel\n}", "func (p *blockParser) emit(b Block) {\n\tp.blockChan <- b\n\tp.start = p.cur\n}", "func (bus *EventBus) Emit(msg Message) {\n\tbus.input <- msg\n}", "func (s *Scanner) emit(t Token) {\n\ts.Items <- &Item{Lit: s.TokenText(), Pos: s.Position, Tok: t}\n}", "func (s *server) send(value interface{}) (interface{}, error) {\n\tevent := &ev{target: value, c: make(chan error, 1)}\n\ts.c <- event\n\terr := <-event.c\n\treturn event.returnValue, err\n}", "func (lx *Lexer) emit(t token.Type) {\n\tlx.tokens <- token.Token{\n\t\tType: t,\n\t\tVal: lx.input[lx.start:lx.pos],\n\t\tLine: lx.line,\n\t}\n\tlx.start = lx.pos\n}", "func emitOutput(ctx context.Context, n *node) stateFn {\n\tif n == nil || n.outputC == nil { // OMIT\n\t\treturn nil // OMIT\n\t} // OMIT\n\tselect {\n\tcase <-ctx.Done():\n\t\tn.err = ctx.Err()\n\t\treturn nil\n\tcase n.outputC <- n.output:\n\t}\n\treturn nil\n}", "func (c *Connection) Emit(eventName string, data interface{}) {\n\tvar ev event\n\tev.Name = eventName\n\tev.Data = data\n\tc.send <- ev\n}", "func (p *program) doWriteOutput(i *instruction) {\n if p.outChannel != nil {\n p.outChannel <- i.params[0].value\n } else {\n p.dataStack = append(p.dataStack, i.params[0].value)\n }\n p.position += i.length\n\n if p.haltOnOutput {\n p.halt = true\n }\n}", "func (s Sink) Output(o Output) {\n\ts(o)\n}", "func (e *encoder) emit(bits, nBits uint32) {\n\tnBits += e.nBits\n\tbits <<= 32 - nBits\n\tbits |= e.bits\n\tfor nBits >= 8 {\n\t\tb := uint8(bits >> 24)\n\t\te.writeByte(b)\n\t\tif b == 0xff {\n\t\t\te.writeByte(0x00)\n\t\t}\n\t\tbits <<= 8\n\t\tnBits -= 8\n\t}\n\te.bits, e.nBits = bits, nBits\n}", "func (r *Relay) Emit(ctx context.Context) {\n\n}", "func (s Sequence) Output(c SeqChan) {s.Do(func(el El){c <- el})}", "func (a *Actor) Send(m string) { a.input <- m }", "func (l *reader) emit(t itemType) {\n\tl.items <- item{t, l.current.String()}\n\tl.current.Reset()\n\tl.width = 0\n}", "func (l *Lexer) emit(t TokenType) {\n\tl.tokens <- Token{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (ms *metricSender) SendValue(name string, value float64, unit string) error {\n\treturn ms.eventEmitter.Emit(&events.ValueMetric{Name: &name, Value: &value, Unit: &unit})\n}", "func (b *Broadcaster) Write(v interface{}) {\n\tc := make(chan message, 1)\n\tb.mx.Lock()\n\tdefer b.mx.Unlock()\n\n\tb.c <- message{v, c}\n\tb.c = c\n}", "func (this Client) emit(message interface{}) {\n mu.Lock()\n for _, client := range clients {\n websocket.JSON.Send(client.Websocket, message)\n }\n mu.Unlock()\n}", "func (fmp *FlatMap) OutputChan() <-chan interface{} {\n\treturn fmp.out\n}", "func (l *Lexer) emit(t tokenType) {\n\tl.tokens <- NewToken(t, l.start, l.input[l.start:l.pos])\n\tl.start = l.pos\n}", "func (l *lexer) emit(t tokenType) {\n\ti := token{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n\tl.items <- i\n}", "func (b *Broadcaster) Send(v interface{}) { b.Sendc <- v }", "func (l *Lexer) emit(t TokenType) {\n\tl.tokens <- Token{t, l.cache()}\n\tl.start = l.pos\n}", "func (s *scanner) emit(t token) {\n\ts.items <- tokenRef{t, s.start, s.input[s.start:s.pos]}\n\ts.start = s.pos\n}", "func (a aio) output() float64 {\n\tsCh := make(chan float64)\n\ta.oCh <- sCh\n\treturn <-sCh\n}", "func (l *lexer) emit(t TokenType) {\r\n\tl.tokens <- Token{t, l.start, l.input[l.start:l.pos], l.line}\r\n\tl.start = l.pos //move to current pos\r\n}", "func (l *Lexer) Emit(t Type, value interface{}) {\n\tl.q.push(Item{\n\t\tType: t,\n\t\tPos: l.S,\n\t\tValue: value,\n\t})\n\tl.updateStart()\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (c *ClickhouseOutput) Emit(event map[string]interface{}) {\n\tc.mux.Lock()\n\tc.events = append(c.events, event)\n\tif len(c.events) < c.bulk_actions {\n\t\tc.mux.Unlock()\n\t\treturn\n\t}\n\n\tevents := c.events\n\tc.events = make([]map[string]interface{}, 0, c.bulk_actions)\n\tc.mux.Unlock()\n\n\tc.bulkChan <- events\n}", "func (m *metricBigipNodeDataTransmitted) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.pos, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (p *spanParser) emit(s Span) {\n\tp.spanChan <- s\n\tp.start = p.cur\n}", "func (lex *Lexer) emit(it TokenType) {\n\tlex.tokens <- Token{it, lex.input[lex.start:lex.pos]}\n\tlex.start = lex.pos\n}", "func (l *Lexer) emit(t itemType) {\n\tl.items <- item{t, l.blob()}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (m *metricBigipVirtualServerDataTransmitted) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func writer(coord string) {\n\tbroadcast <- coord\n}", "func (m *metricRedisClientsMaxOutputBuffer) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (l *lexer) emit(k Token) {\n\ti := Item{T: k, Val: l.input[l.start:l.position]}\n\tl.items <- i\n\tl.ignore() // reset our scanner now that we've dispatched a segment\n}", "func (l *lexer) emit(k Token) {\n\ti := Item{T: k, Val: l.input[l.start:l.position]}\n\tl.items <- i\n\tl.ignore() // reset our scanner now that we've dispatched a segment\n}", "func (l *lexer) emit() string {\n ret := l.input[l.start:l.pos]\n l.start = l.pos\n return ret\n}", "func (l *lexer) emit(t token.ItemType) {\n\tl.Items <- token.Token{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos], l.startLine}\n\tl.start = l.pos\n\tl.startLine = l.line\n}", "func (c *IChan) Send(value []byte) error {\n\tcheckState(c.input)\n\treturn c.send(value)\n}", "func (e *Stream) Emit(data string) error {\n\t_, err := fmt.Fprintf(e.writer, \"data: %s\\n\\n\", data)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.flusher.Flush()\n\n\treturn nil\n}", "func Emit(conn *dbus.Conn, s Signal) error {\n\treturn conn.Emit(s.path(), s.Interface()+\".\"+s.Name(), s.values()...)\n}", "func (l *lexer) emit(t tokenType) {\n\tl.tokens <- token{t, l.start, l.runeCnt, string(l.input[l.start:l.pos])}\n\tl.start = l.pos\n\tl.runeCnt = 0\n}", "func (t *Tokeniser) emit(typ Type) {\n\ttknEmitted := t.token(typ)\n\tt.prevToken = &tknEmitted\n\tt.Tokens.Push(tknEmitted)\n\tt.ignore()\n}", "func (n *JsonStream) Send(v interface{}) error {\n\tif n.ctx.Err() != nil {\n\t\treturn n.ctx.Err()\n\t}\n\n\tvar buf bytes.Buffer\n\tenc := codec.NewEncoder(&buf, structs.JsonHandleWithExtensions)\n\terr := enc.Encode(v)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error marshaling json for stream: %w\", err)\n\t}\n\n\tselect {\n\tcase <-n.ctx.Done():\n\t\treturn fmt.Errorf(\"error stream is no longer running: %w\", err)\n\tcase n.outCh <- &structs.EventJson{Data: buf.Bytes()}:\n\t}\n\n\treturn nil\n}", "func (r *Room) SendEmit(senderId int, msg Message) {\n\tfor id, client := range r.clients {\n\t\tif id != senderId {\n\t\t\tclient.WriteMessage(msg)\n\t\t}\n\t}\n}", "func (w *Writer) Emit(typ byte, args ...uint64) {\n\tnargs := byte(len(args)) - 1\n\tif nargs > 3 {\n\t\tnargs = 3\n\t}\n\tbuf := []byte{typ | nargs<<6}\n\tif nargs == 3 {\n\t\tbuf = append(buf, 0)\n\t}\n\tfor _, a := range args {\n\t\tbuf = appendVarint(buf, a)\n\t}\n\tif nargs == 3 {\n\t\tbuf[1] = byte(len(buf) - 2)\n\t}\n\tn, err := w.Write(buf)\n\tif n != len(buf) || err != nil {\n\t\tpanic(\"failed to write\")\n\t}\n}", "func (c *Compiler) emit(op operation.Opcode, operands ...int) int {\n\tins := operation.NewInstruction(op, operands...)\n\tpos := c.addInstruction(ins)\n\tc.setEmitted(op, pos)\n\treturn pos\n}", "func (s *Socket) Emit(event event, msgType MessageType, args interface{}) (err error) {\n\tvar pktType PacketType\n\tswitch event {\n\tcase EventOpen:\n\t\tpktType = PacketTypeOpen\n\tcase EventMessage:\n\t\tpktType = PacketTypeMessage\n\tcase EventClose:\n\t\tpktType = PacketTypeClose\n\t// case EventError:\n\t// case EventUpgrade:\n\tcase EventPing:\n\t\tpktType = PacketTypePing\n\tcase EventPong:\n\t\tpktType = PacketTypePong\n\tdefault:\n\t\treturn\n\t}\n\tvar data []byte\n\tif d, ok := args.([]byte); ok {\n\t\tdata = d\n\t} else if s, ok := args.(string); ok {\n\t\tdata = []byte(s)\n\t} else {\n\t\tdata, err = json.Marshal(args)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn s.emitter.submit(&Packet{msgType: msgType, pktType: pktType, data: data})\n}", "func (r *Room) Emit(actionType string, data interface{}) {\n\tfor s := range r.sockets {\n\t\ts.Emit(actionType, data)\n\t}\n}", "func (m *metricBigipPoolMemberDataTransmitted) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func Emit(ctx context.Context, value []byte, to Recipient) error {\n\t// TombstonesDelay doesn't matter for Add.\n\td := dsset.Set{Parent: to.Key}\n\t// Keep IDs well distributed, but record creation time in it.\n\t// See also oldestEventAge().\n\tid := fmt.Sprintf(\"%s/%d\", uuid.New().String(), clock.Now(ctx).UnixNano())\n\tif err := d.Add(ctx, []dsset.Item{{ID: id, Value: value}}); err != nil {\n\t\treturn errors.Annotate(err, \"failed to send event\").Err()\n\t}\n\tmetricSent.Add(ctx, 1, to.MonitoringString)\n\treturn nil\n}", "func (c *Config) emitEvent(event Event) {\n\tevent.Timestamp = time.Now()\n\tselect {\n\tcase c.events <- event:\n\tdefault:\n\t}\n}", "func (m *metricMysqlIndexIoWaitTime) emit(metrics pmetric.MetricSlice) {\n\tif m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (g *gaugeMetric) Emit(c LogClient) {\n\toptions := []loggregator.EmitGaugeOption{\n\t\tloggregator.WithGaugeValue(\n\t\t\tg.name,\n\t\t\ttoFloat64(atomic.LoadUint64(&g.value), 2),\n\t\t\tg.unit,\n\t\t),\n\t\tg.sourceIDOption,\n\t}\n\n\tfor k, v := range g.tags {\n\t\toptions = append(options, loggregator.WithEnvelopeTag(k, v))\n\t}\n\n\tc.EmitGauge(options...)\n}", "func (l *lexer) emit(typ tokenType) {\n\tl.tokens = append(l.tokens, token{\n\t\ttyp: typ,\n\t\tval: l.in[l.start:l.pos],\n\t})\n\tl.start = l.pos\n}", "func (tt *Tester) handleEmit(topic string, key string, value []byte, options ...EmitOption) *goka.Promise {\n\topts := new(emitOption)\n\topts.applyOptions(options...)\n\t_, finisher := goka.NewPromiseWithFinisher()\n\toffset := tt.pushMessage(topic, key, value, opts.headers)\n\treturn finisher(&sarama.ProducerMessage{Offset: offset}, nil)\n}", "func (op *output) Value() uint64 {\n\treturn op.value\n}", "func (op *output) Value() uint64 {\n\treturn op.value\n}", "func (op *output) Value() uint64 {\n\treturn op.value\n}", "func (out *TransferableOutput) Output() Transferable { return out.Out }", "func (m *metricRedisNetOutput) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipPoolDataTransmitted) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipNodeEnabled) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *Main) Send(event string, value interface{}) {\n\tfmt.Println(\"Send Event\")\n\tjsonString, err := json.Marshal(value)\n\n\tif err != nil {\n\t\tfmt.Printf(\"Error on sending value: %v\\n\", err)\n\t\treturn\n\t}\n\n\tjsString := fmt.Sprintf(`window.renderer.trigger(\"%s\", \"%s\")`, event, template.JSEscapeString(string(jsonString)))\n\tm.w.Eval(jsString)\n}", "func (m *metricRedisLatestFork) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipVirtualServerEnabled) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipPoolMemberEnabled) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricFlinkJvmMemoryMappedUsed) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos], l.line}\n\tl.prevItemType = t\n\tl.start = l.pos\n}", "func (p *EventEmitter) Emit(content interface{}, namaEvents ...string) {\n\tfor _, namaEvent := range namaEvents {\n\t\tif chanList, ok := p.mapClients[namaEvent]; ok {\n\t\t\t//Someone has subscribed this namaEvent\n\t\t\tfor _, channel := range chanList {\n\t\t\t\tchannel <- content\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *metricAerospikeNodeConnectionOpen) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (o StreamOptimizer) Output(ctx context.Context, c StreamConsumer) (err error) {\n\tfor change := range o.changeQ {\n\n\t\t// There are two reasons we may want to abort early. Either the context has terminated, or there was an error\n\t\t// during processing\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// If error is non-nil, we'll abort upon next iteration.\n\t\terr = c.Send(change)\n\t}\n\n\treturn\n}", "func (m *metricRedisClientsMaxInputBuffer) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisNetInput) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipNodeConnectionCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (i *Input) SetValue(value Message) error {\n\t// we store the marshalled value in the Input so we can access it later\n\ti.Lock()\n\ti.Value = value\n\ti.Unlock()\n\n\t// then, to set an input to a particular value, we just push\n\t// that value to that input, as though we had a little pusher block.\n\n\t// first kill any existing value pusher\n\tstopValuePusher(i)\n\n\t// then set the pusher going\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase i.Connection <- value:\n\t\t\tcase <-i.quitChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}", "func (m *metricFlinkJvmCPUTime) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (e *Emitter) Emit(topic string, value interface{}) (done chan struct{}) {\n\te.mu.RLock()\n\tdefer e.mu.RUnlock()\n\n\tdone = make(chan struct{})\n\n\tif e.topicListeners == nil {\n\t\tclose(done)\n\t\treturn done\n\t}\n\tlns, ok := e.topicListeners[topic]\n\tif !ok || len(lns) == 0 {\n\t\tclose(done)\n\t\treturn done\n\t}\n\n\tgo func() {\n\t\tdefer close(done)\n\t\tfor _, lnch := range lns {\n\t\t\tlnch <- value\n\t\t}\n\t}()\n\treturn done\n}", "func (m *metricBigipVirtualServerConnectionCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (c *Conn) Emit(name string, body interface{}) error {\n\tmsg := Message{\n\t\tName: name,\n\t\tBody: body,\n\t}\n\tb, _ := json.Marshal(msg)\n\n\th := ws.Header{\n\t\tFin: true,\n\t\tOpCode: ws.OpText,\n\t\tMasked: false,\n\t\tLength: int64(len(b)),\n\t}\n\n\treturn c.Write(h, b)\n}", "func (m *metricBigipVirtualServerPacketCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipPoolMemberCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (i *InfluxDBSender) Send(e metrics.Event) {\n\t// Converting to influxdb format\n\tbuf := bytes.NewBufferString(e.Metric)\n\tif len(e.Params) > 0 {\n\t\tfor _, param := range e.Params {\n\t\t\tbuf.WriteRune(',')\n\t\t\tbuf.WriteString(param)\n\t\t}\n\t}\n\tbuf.WriteString(\" value=\")\n\tbuf.WriteString(strconv.FormatInt(e.Value, 10))\n\tbuf.WriteRune('\\n')\n\n\ti.writer.Write(buf.Bytes())\n}", "func (m *metricActiveDirectoryDsNotificationQueued) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipNodePacketCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricAerospikeNodeConnectionCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipVirtualServerRequestCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricMysqlIndexIoWaitCount) emit(metrics pmetric.MetricSlice) {\n\tif m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisCommandsProcessed) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipPoolMemberConnectionCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}" ]
[ "0.71517444", "0.7032162", "0.6520443", "0.6100109", "0.6080597", "0.6074423", "0.60067284", "0.598246", "0.593429", "0.5927578", "0.59140766", "0.5908921", "0.5881568", "0.5873035", "0.58528805", "0.582588", "0.5802329", "0.5796881", "0.5786085", "0.5757521", "0.5737987", "0.5716691", "0.5705388", "0.5670479", "0.5656314", "0.56481683", "0.56294316", "0.5625927", "0.5608804", "0.5602092", "0.5582701", "0.5581985", "0.5576062", "0.5574771", "0.55675745", "0.556114", "0.555462", "0.5546502", "0.5546502", "0.5546502", "0.55176795", "0.5498366", "0.5484767", "0.54594487", "0.54594487", "0.54575807", "0.54530495", "0.5449534", "0.5447532", "0.5441897", "0.54373443", "0.54064333", "0.5403341", "0.540333", "0.53952277", "0.5391856", "0.5383085", "0.53820455", "0.53713596", "0.53633976", "0.53552383", "0.53362215", "0.5331421", "0.5330686", "0.5328555", "0.5327949", "0.5316939", "0.5316939", "0.5316939", "0.53087556", "0.52863276", "0.52706116", "0.5267056", "0.5254508", "0.5244605", "0.5237429", "0.5234555", "0.5227618", "0.5224966", "0.5222286", "0.5218428", "0.5214043", "0.52039224", "0.52033573", "0.5188875", "0.51799494", "0.5173733", "0.5173102", "0.51692075", "0.5158627", "0.5140463", "0.5140142", "0.51357394", "0.512928", "0.51261944", "0.51243705", "0.51230067", "0.51192796", "0.5115003", "0.51127684" ]
0.7160126
0
mapCount computes the number of values in an iterator.
func mapCount(itr Iterator, m *mapper) { n := 0 for k, _ := itr.Next(); k != 0; k, _ = itr.Next() { n++ } m.emit(itr.Time(), float64(n)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MapCount(itr Iterator) interface{} {\n\tn := float64(0)\n\tfor k, _ := itr.Next(); k != -1; k, _ = itr.Next() {\n\t\tn++\n\t}\n\tif n > 0 {\n\t\treturn n\n\t}\n\treturn nil\n}", "func Count(itr Iterator) int {\n\tconst mask = ^Word(0)\n\tcount := 0\n\tfor {\n\t\tw, n := itr.Next()\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif n < bitLength-1 {\n\t\t\tw &= mask >> uint(bitLength-n)\n\t\t}\n\t\tcount += bits.OnesCount(uint(w))\n\t}\n\treturn count\n}", "func (m Map) Count() int {\n\treturn m.Imm.Len()\n}", "func (m *Cmap) Count() int {\n\treturn int(atomic.LoadInt64(&m.count))\n}", "func (m NMap) Count() int {\n\tcount := 0\n\tfor _, inMap := range m {\n\t\tinMap.RLock()\n\t\tcount += len(inMap.objs)\n\t\tinMap.RUnlock()\n\t}\n\treturn count\n}", "func MapCountDistinct(itr Iterator) interface{} {\n\tvar index = make(map[interface{}]struct{})\n\n\tfor time, value := itr.Next(); time != -1; time, value = itr.Next() {\n\t\tindex[value] = struct{}{}\n\t}\n\n\tif len(index) == 0 {\n\t\treturn nil\n\t}\n\n\treturn index\n}", "func countIter(n int) int {\n\tcount := 0\n\n\tfor n != 0 {\n\t\tcount += n & 1\n\t\tn >>= 1\n\t}\n\n\treturn count\n}", "func TestMapCount(t *testing.T) {\n\tm := map[Key]interface{}{}\n\ttestMapCountN(testN, m)\n}", "func Count(it *Iterator) (int, error) {\n\tcount := 0\n\tfor {\n\t\t_, err := it.Next()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn count, err\n\t\t}\n\t\tcount += 1\n\t}\n\treturn count, nil\n}", "func getTotalCount(a map[string]int) int {\n\tvar result int\n\tfor _, v := range a {\n\t\tresult += v\n\t}\n\treturn result\n}", "func (m *OMap) Count() int {\n\treturn len(m.keys)\n}", "func (self *SafeMap) Count() int {\n\tself.lock.RLock()\n\tdefer self.lock.RUnlock()\n\n\treturn len(self.sm)\n}", "func (j Json) IterMap(f func(key string, value Json) bool) int {\n\tm, ok := j.asMap()\n\tif !ok {\n\t\treturn 0\n\t}\n\n\tcount := 0\n\tfor k, v := range m {\n\t\tcount++\n\t\tif !f(k, Json{v, true}) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn count\n}", "func (h *HashMap) Count() int {\n\treturn h.Entries\n}", "func (p *SliceOfMap) Count(key interface{}) (cnt int) {\n\tk := ToString(key)\n\tcnt = p.CountW(func(x O) bool { return ToStringMap(x).Exists(k) })\n\treturn\n}", "func (m ConcurrentMap[K, V]) Count() int {\n\tcount := 0\n\tfor i := 0; i < SHARD_COUNT; i++ {\n\t\tshard := m.shards[i]\n\t\tshard.RLock()\n\t\tcount += len(shard.items)\n\t\tshard.RUnlock()\n\t}\n\treturn count\n}", "func (m *ConcurrentMap) Count() int {\n\tcount := 0\n\tfor i := 0; i < SHARD_COUNT; i++ {\n\t\tshard := m.shards[i]\n\t\tshard.RLock()\n\t\tcount += len(shard.items)\n\t\tshard.RUnlock()\n\t}\n\treturn count\n}", "func (w WaysMapping) Count() int {\n\treturn len(w)\n}", "func (x *intSet) count() int {\n\tn := 0\n\ty := *x\n\tfor y != 0 {\n\t\ty &= (y - 1)\n\t\tn++\n\t}\n\treturn n\n}", "func (m ConcurrentRoomInfoMap) Count() int {\n\tcount := 0\n\tfor i := 0; i < shardCount; i++ {\n\t\tshard := m[i]\n\t\tshard.RLock()\n\t\tcount += len(shard.items)\n\t\tshard.RUnlock()\n\t}\n\treturn count\n}", "func testMapCountN(n int, m map[Key]interface{}) {\n\tfor i := 0; i < n; i++ {\n\t\t_ = len(m)\n\t}\n}", "func testCountN(n int, hm HashMaper) {\n\tfor i := 0; i < n; i++ {\n\t\thm.Count()\n\t}\n}", "func (m *ConcurrentMap) Count() int {\n\tcount := 0\n\tfor i := 0; i < SHARD_COUNT; i++ {\n\t\tshard := m.Shareds[i]\n\t\tshard.RLock()\n\t\tcount += len(shard.items)\n\t\tshard.RUnlock()\n\t}\n\treturn count\n}", "func (s *CPUSet) Count() int {\n\tc := 0\n\tfor _, b := range s {\n\t\tc += onesCount64(uint64(b))\n\t}\n\treturn c\n}", "func (set Int64Set) CountBy(predicate func(int64) bool) (result int) {\n\tfor v := range set {\n\t\tif predicate(v) {\n\t\t\tresult++\n\t\t}\n\t}\n\treturn\n}", "func calculateCount(cMap map[string]int) int {\n\tcount := 0\n\tfor _, val := range cMap {\n\t\tif val > 0 {\n\t\t\tcount++\n\t\t}\n\t\tif count == 5 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn count\n}", "func (m *TimeoutMap) Count() int {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\treturn len(m.elements)\n}", "func (sm sharedMap) Count() int {\n\tcallback := make(chan interface{})\n\tsm.c <- command{action: count, result: callback}\n\treturn (<-callback).(int)\n}", "func Count(array []interface{}, iterator ConditionIterator) int {\r\n\tcount := 0\r\n\tfor index, data := range array {\r\n\t\tif iterator(data, index) {\r\n\t\t\tcount = count + 1\r\n\t\t}\r\n\t}\r\n\treturn count\r\n}", "func (b *Array) Count() (c int) {\n\tfor _, v := range b.Bits() {\n\t\tc += nSetBits(uintptr(v))\n\t}\n\treturn\n}", "func (c *countHashReader) Count() int {\n\treturn c.n\n}", "func TestCount1024(t *testing.T) {\n\thm, _ := NewHashMap(1024)\n\ttestCountN(testN, hm)\n}", "func (s IntSet) Count() int {\n\treturn len(s)\n}", "func (ctx Context) Count(input chan float64) (n uint) {\n\tfor _ = range input {\n\t\tn++\n\t}\n\n\treturn n\n}", "func (sm *scoreMemberMap) count(min, max float64) int {\n\tn := 0\n\tfor cur := sm.head; cur != nil && cur.score <= max; cur = cur.next {\n\t\tif cur.score >= min {\n\t\t\tn += len(cur.members)\n\t\t}\n\t}\n\treturn n\n}", "func TestCount64(t *testing.T) {\n\thm, _ := NewHashMap(64)\n\ttestCountN(testN, hm)\n}", "func (c *countHashWriter) Count() int {\n\treturn c.n\n}", "func Count(max int, itr i.Forward) i.Forward {\n\treturn &count{max: max, itr: itr}\n}", "func (b *BitSet) Count() int {\n\tn := 0\n\tl := b.LowBit()\n\th := b.HighBit()\n\tfor i := l; i <= h; i++ { // for all values up to highest\n\t\tif b.Test(i) { // if this value is included\n\t\t\tn++ // count it\n\t\t}\n\t}\n\treturn n\n}", "func (o LookupGlossaryResultOutput) EntryCount() pulumi.IntOutput {\n\treturn o.ApplyT(func(v LookupGlossaryResult) int { return v.EntryCount }).(pulumi.IntOutput)\n}", "func (s Stream) Count() (int, error) {\n\tcounter := 0\n\n\terr := s.Iterate(func(r Record) error {\n\t\tcounter++\n\t\treturn nil\n\t})\n\n\treturn counter, err\n}", "func (r *linesIterator) Count() uint64 {\n\treturn uint64(r.linesCount)\n}", "func (this *Tuple) Count(item interface{}, start int) int {\n\tctr := 0\n\tfor i := start; i < this.Len(); i++ {\n\t\tif TupleElemEq(this.Get(i), item) {\n\t\t\tctr += 1\n\t\t}\n\t}\n\treturn ctr\n}", "func (bm BitMap) BitCount(ctx context.Context, start, end int64) (int64, error) {\n\treq := newRequest(\"*4\\r\\n$8\\r\\nBITCOUNT\\r\\n$\")\n\treq.addStringInt2(bm.name, start, end)\n\treturn bm.c.cmdInt(ctx, req)\n}", "func PopCount(x uint64) int {\n\tvar c uint64\n\tfor i := 0; i < 64; i++ {\n\t\tc += (x >> i) & 1\n\t}\n\treturn int(c)\n}", "func TestCount128(t *testing.T) {\n\thm, _ := NewHashMap(128)\n\ttestCountN(testN, hm)\n}", "func (m MultiSet) Count (val string) int {\n\tcount := 0\n\tfor _, num := range m {\n\t\tint_val, _ := strconv.Atoi(val)\n\t\tif num == int_val {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}", "func (c StringArrayCollection) CountBy(callback ...interface{}) map[interface{}]int {\n\tvalueCount := make(map[interface{}]int)\n\n\tif len(callback) > 0 {\n\t\tif cb, ok := callback[0].(FilterFun); ok {\n\t\t\tfor _, v := range c.value {\n\t\t\t\tvalueCount[cb(v)]++\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor _, v := range c.value {\n\t\t\tvalueCount[v]++\n\t\t}\n\t}\n\n\treturn valueCount\n}", "func (self Mset) Count (value interface{}) uint64 {\n\tk, v := self.h.Get(value)\n\tif k == nil {\n\t\treturn 0\n\t} else {\n\t\treturn (*v).(uint64)\n\t}\n}", "func (o UniformInt64RangePartitionSchemeDescriptionOutput) Count() pulumi.IntOutput {\n\treturn o.ApplyT(func(v UniformInt64RangePartitionSchemeDescription) int { return v.Count }).(pulumi.IntOutput)\n}", "func TileCounts(available string) (counts map[byte]int) {\n\tcounts = make(map[byte]int)\n\tfor _, c := range strings.ToUpper(available) {\n\t\tcounts[byte(c)]++\n\t}\n\treturn\n}", "func PopulationCounts(xs []byte) int {\n\tcount := 0\n\tfor _, x := range xs {\n\t\tcount += PopulationCount(x)\n\t}\n\treturn count\n}", "func (h HMSketch) Count(kvs map[string]string) float64 {\n\thist := h.Sketch(kvs)\n\treturn hist.Total()\n}", "func (b Bitmask) OnesCount() (n int) {\n\tfor _, v := range b {\n\t\tn += bits.OnesCount64(v)\n\t}\n\treturn\n}", "func (r *SlidingWindow) Count() int {return r.count}", "func count(rawdata string, c chan dict) { // mapfn\n\tm := make(dict)\n\twords := strings.Fields(rawdata)\n\n\tfor _, word := range words {\n\t\tm[word]++\n\t}\n\n\tc <- m\n}", "func (c *Counter) Count() int64 { return c.count }", "func MapSum(itr Iterator) interface{} {\n\tn := float64(0)\n\tcount := 0\n\tvar resultType NumberType\n\tfor k, v := itr.Next(); k != -1; k, v = itr.Next() {\n\t\tcount++\n\t\tswitch n1 := v.(type) {\n\t\tcase float64:\n\t\t\tn += n1\n\t\tcase int64:\n\t\t\tn += float64(n1)\n\t\t\tresultType = Int64Type\n\t\t}\n\t}\n\tif count > 0 {\n\t\tswitch resultType {\n\t\tcase Float64Type:\n\t\t\treturn n\n\t\tcase Int64Type:\n\t\t\treturn int64(n)\n\t\t}\n\t}\n\treturn nil\n}", "func Count(v uint64) (int, error) {\n\tsel := v >> 60\n\tif sel >= 16 {\n\t\treturn 0, fmt.Errorf(\"invalid selector value: %v\", sel)\n\t}\n\treturn selector[sel].n, nil\n}", "func (fm *FileMapMutex) Count() (size int) {\n\tfm.mu.RLock()\n\tdefer fm.mu.RUnlock()\n\treturn len(fm.Files)\n}", "func (c CounterSnapshot) Count() int64 { return int64(c) }", "func (store *EntryStore) Count() int64 {\n\tprop := store.db.GetProperty(\"rocksdb.estimate-num-keys\")\n\tc, _ := strconv.ParseInt(prop, 10, 64)\n\treturn c\n}", "func (is *InfoStore) infoCount() uint32 {\n\tcount := uint32(len(is.Infos))\n\tfor _, group := range is.Groups {\n\t\tcount += uint32(len(group.Infos))\n\t}\n\treturn count\n}", "func (l *Layer) Count(value int) int {\n\tcount := 0\n\tfor _, b := range l.Bytes {\n\t\tif b == value {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}", "func UseCount(s string) map[string]int {\n\txs := strings.Fields(s)\n\tm := make(map[string]int)\n\tfor _, v := range xs {\n\t\tm[v]++\n\t}\n\treturn m\n}", "func UseCount(s string) map[string]int {\n\txs := strings.Fields(s)\n\tm := make(map[string]int)\n\tfor _, v := range xs {\n\t\tm[v]++\n\t}\n\treturn m\n}", "func UseCount(s string) map[string]int {\n\txs := strings.Fields(s)\n\tm := make(map[string]int)\n\tfor _, v := range xs {\n\t\tm[v]++\n\t}\n\treturn m\n}", "func CountIntArray(arr []int32) map[int32]int32 {\n\tmodel := make(map[int32]int32)\n\tfor _, elem := range arr {\n\t\t// first element is already initialized with 0\n\t\tmodel[elem] += 1\n\t}\n\treturn model\n}", "func (g *Grid) Count() int32 {\n\treturn int32(len(g.set))\n}", "func TestCount16(t *testing.T) {\n\thm, _ := NewHashMap(16)\n\ttestCountN(testN, hm)\n}", "func PopCount(num int) int {\n\tres := 0\n\n\tfor i := 0; i < 70; i++ {\n\t\tif ((num >> uint(i)) & 1) == 1 {\n\t\t\tres++\n\t\t}\n\t}\n\n\treturn res\n}", "func (s *Store) Count(key storage.Key) (int, error) {\n\tkeys := util.BytesPrefix([]byte(key.Namespace() + separator))\n\titer := s.db.NewIterator(keys, nil)\n\n\tvar c int\n\tfor iter.Next() {\n\t\tc++\n\t}\n\n\titer.Release()\n\n\treturn c, iter.Error()\n}", "func CountMatches(n int, a, b generator) int {\n\tvar count int // starts count as 0\n\n\t// loop through the values generated\n\tfor i := 0; i < n; i++ {\n\t\t// if the least significant bits match, increase count\n\t\tif a.NextValue()&mask == b.NextValue()&mask {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}", "func count64(bitMask []uint64) (result uint64) {\n\n\tfor i := 0; i < len(bitMask); i++ {\n\t\tresult += uint64(bits.OnesCount64(bitMask[i]))\n\t}\n\treturn\n}", "func (o GetKubernetesClusterAgentPoolProfileOutput) Count() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetKubernetesClusterAgentPoolProfile) int { return v.Count }).(pulumi.IntOutput)\n}", "func (bits *BitArray) Count() int {\n\tlength := 0\n\n\tfor i := 0; i < bits.lenpad; i += _BytesPW {\n\t\tw := bytes2word(bits.bytes[i : i+_BytesPW])\n\t\tlength += countbits64(w)\n\t}\n\n\treturn length\n}", "func CountOf(val bool, slice []bool) int {\n\tcount := 0\n\tfor _, el := range slice {\n\t\tif val == el {\n\t\t\tcount = count + 1\n\t\t}\n\t}\n\treturn count\n}", "func (info Scalar) Count(e *Config, key label.Key) {\n\tdata := &Int64Data{Info: &info, key: nil}\n\te.subscribe(key, data.count)\n}", "func (s *SliceInt) Count() int {\n\treturn len(s.data)\n}", "func PopCount(x uint64) int {\n\tn := 0\n\tfor x != 0 {\n\t\tx = x & (x - 1)\n\t\tn++\n\t}\n\treturn n\n}", "func Zcount(key string, min, max float64) (int, error) {\n\tzset, err := zsetOf(key)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tif zset == nil {\n\t\treturn 0, nil\n\t}\n\treturn zset.count(min, max), nil\n}", "func CountAppInEachZone(mapping map[string]string, zoneNum int) map[string]int {\n\tvar zoneName string\n\tAppNumInEachZone := make(map[string]int, zoneNum)\n\tcount := 0\n\tfor i := 0; i < zoneNum; i++ {\n\t\tzoneName = \"zone\" + strconv.Itoa(i)\n\t\tcount = 0\n\t\tfor _, v := range mapping {\n\t\t\tif v == zoneName {\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\n\t\tAppNumInEachZone[zoneName] = count\n\t}\n\treturn AppNumInEachZone\n}", "func (sw *scanWrap) Count() (int64, error) {\n\treturn sw.scan.Count()\n}", "func PopCount(number int) int {\n\tcounts := 0\n\tfor number > 0 {\n\t\tcounts += number & 1\n\t\tnumber = number >> 1\n\t}\n\t\n\treturn counts\n}", "func (b *Bitset) Count() int {\n\tbitlen := b.bitlength\n\tcount := 0\n\tfor i := 0; i < bitlen; i++ {\n\t\tif b.IsSet(i) {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}", "func (c *Aggregator) Count() (uint64, error) {\n\treturn c.state.count, nil\n}", "func Count(s beam.Scope, col beam.PCollection) beam.PCollection {\n\ts = s.Scope(\"stats.Count\")\n\n\tpre := beam.ParDo(s, keyedCountFn, col)\n\treturn SumPerKey(s, pre)\n}", "func (h *HashReader) Count() uint64 {\n\treturn h.count\n}", "func emitCount() int {\n\tlogn := math.Log(float64(knownNodes.length()))\n\tmult := (lambda * logn) + 0.5\n\n\treturn int(mult)\n}", "func population_count(n uint) (count uint) {\n\tfor i := n; i > 0; { // start from n and continue until zero\n\t\tcount += i & 1 // if the least significant bit is set then add it to the count\n\t\ti >>= 1 // bit shift right, discarding the bit we've just checked\n\t}\n\treturn\n}", "func countFreq(s string) map[rune]int {\n\tfreqMap := make(map[rune]int)\n\tfor _, c := range s {\n\t\tfreqMap[c] += 1\n\t}\n\treturn freqMap\n}", "func countFullModuleMap(fullModuleMap FullModuleMap) int {\n\tcount := 0\n\tfor _, s := range fullModuleMap {\n\t\tcount = count + len(s)\n\t}\n\n\treturn count\n}", "func Count() AggregateFunc {\n\treturn func(start, end string) (string, *dsl.Traversal) {\n\t\tif end == \"\" {\n\t\t\tend = DefaultCountLabel\n\t\t}\n\t\treturn end, __.As(start).Count(dsl.Local).As(end)\n\t}\n}", "func (store *db) ZCOUNT(key string, min int, max int) (int, error) { \n\tif store == nil {\n\t\tfmt.Println(\"ZCARD : store is nil\")\n\t\treturn 0, errors.New(fmt.Sprint(\"ZCOUNT : store is nil\"))\n\t}\n\n\tentry, ok := store.setmapEntry[key]\n\tvar count int = 0\n\n\tif ok == false {\n\t\treturn count, errors.New(fmt.Sprint(\"ZCOUNT : key \", key, \"not found\"))\n\t}\n\n\t/* Take DB entry Rlock before get, this is lock per key entry */\n\tentry.lock.RLock()\n\tdefer entry.lock.RUnlock()\n\t\n\tfor key,value := range entry.setEntry {\n\t\tif key >= min && key <= max {\n\t\t\tcount = count + value.Size() \n\t\t\tfmt.Println(\"set : \", value.String())\n\t\t}\n\t}\n\n\treturn count, nil\n}", "func (s *LinearState) Count(ctx *Context) int {\n\ts.slock(ctx, true)\n\tn := len(s.Facts)\n\ts.sunlock(ctx, true)\n\treturn n\n}", "func (b Bits) Count() int {\n\treturn bits.OnesCount64(uint64(b))\n}", "func LoopPopCount(x uint64) int {\n\tvar cnt int\n\tvar i byte\n\tfor i = 0; i < 64; i += 8 {\n\t\tcnt += int(pc[byte(x>>i)])\n\t}\n\treturn cnt\n}", "func (t *Types) Count() uint64 {\n\treturn uint64(len(t.entries))\n}", "func (v *Bitmap256) Count() int {\n\treturn bits.OnesCount64(v[0]) +\n\t\tbits.OnesCount64(v[1]) +\n\t\tbits.OnesCount64(v[2]) +\n\t\tbits.OnesCount64(v[3])\n}", "func PopCount(num int, ub int) int {\n\tres := 0\n\n\tfor i := 0; i < ub; i++ {\n\t\tif ((num >> uint(i)) & 1) == 1 {\n\t\t\tres++\n\t\t}\n\t}\n\n\treturn res\n}" ]
[ "0.8504498", "0.6812674", "0.67728424", "0.6678749", "0.66751015", "0.6630631", "0.66246307", "0.66168755", "0.65414244", "0.6417984", "0.6369643", "0.6345156", "0.6326942", "0.6302197", "0.6224095", "0.6205847", "0.6195354", "0.6176332", "0.6159858", "0.6153758", "0.6149981", "0.6131688", "0.6104737", "0.60962796", "0.60770285", "0.6076014", "0.60536146", "0.6042311", "0.602839", "0.60283697", "0.60058886", "0.6003758", "0.59958804", "0.59900045", "0.5979238", "0.59745675", "0.5947919", "0.5923746", "0.5891868", "0.5890683", "0.588439", "0.5867067", "0.5859457", "0.5851439", "0.58432174", "0.5839033", "0.5837654", "0.5834824", "0.58303523", "0.5814455", "0.58125424", "0.57929254", "0.57724905", "0.5743308", "0.5741818", "0.57273865", "0.57268727", "0.57128376", "0.57101953", "0.57032955", "0.5698376", "0.56824094", "0.56698155", "0.5652212", "0.56507397", "0.56507397", "0.56507397", "0.5631608", "0.5616411", "0.560981", "0.5608565", "0.5598797", "0.559686", "0.5587039", "0.5586231", "0.5571114", "0.5569845", "0.5561962", "0.55580366", "0.5538672", "0.55364233", "0.5529824", "0.55204475", "0.55182904", "0.55182654", "0.5513271", "0.550616", "0.55053383", "0.55042243", "0.55000687", "0.54996496", "0.5494978", "0.54859847", "0.5484503", "0.5471909", "0.5467615", "0.5467121", "0.54638654", "0.5458638", "0.5448594" ]
0.8270534
1
mapSum computes the summation of values in an iterator.
func mapSum(itr Iterator, m *mapper) { n := float64(0) for k, v := itr.Next(); k != 0; k, v = itr.Next() { n += v.(float64) } m.emit(itr.Time(), n) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MapSum(itr Iterator) interface{} {\n\tn := float64(0)\n\tcount := 0\n\tvar resultType NumberType\n\tfor k, v := itr.Next(); k != -1; k, v = itr.Next() {\n\t\tcount++\n\t\tswitch n1 := v.(type) {\n\t\tcase float64:\n\t\t\tn += n1\n\t\tcase int64:\n\t\t\tn += float64(n1)\n\t\t\tresultType = Int64Type\n\t\t}\n\t}\n\tif count > 0 {\n\t\tswitch resultType {\n\t\tcase Float64Type:\n\t\t\treturn n\n\t\tcase Int64Type:\n\t\t\treturn int64(n)\n\t\t}\n\t}\n\treturn nil\n}", "func (m AddressAmountMap) Sum() uint64 {\n\tvar sum uint64\n\tfor _, amount := range m {\n\t\tsum += amount\n\t}\n\treturn sum\n}", "func MapSum[T Number](slicesOfItems [][]T) []T {\n\tresult := make([]T, 0, len(slicesOfItems))\n\n\tfor _, items := range slicesOfItems {\n\t\tresult = append(result, Sum(items))\n\t}\n\treturn result\n}", "func TestSum(t *testing.T) {\n\ttestMap := [][]int{\n\t\t{1, 1, 2},\n\t\t{2, 2, 4},\n\t\t{4, 4, 8},\n\t\t{5, 15, 20},\n\t}\n\n\tfor _, v := range testMap {\n\t\tif i := Sum(v[0], v[1]); i != v[2] {\n\t\t\tt.Errorf(\"Error at Sum(%d, %d) returned %d\", v[0], v[1], i)\n\t\t}\n\t}\n}", "func Sum(items []Value) (op int) {\n\tfor _, item := range items {\n\t\top += item.Value()\n\t}\n\treturn\n}", "func Sum(sl []float64) float64 {\n\tres := float64(0)\n\tfor _, val := range sl {\n\t\tres += val\n\t}\n\treturn res\n}", "func reduceSum(key string, values []interface{}, r *reducer) {\n\tvar n float64\n\tfor _, v := range values {\n\t\tn += v.(float64)\n\t}\n\tr.emit(key, n)\n}", "func Sum(input []float64) (sum float64) {\n\tfor _, v := range input {\n\t\tvar fuel float64\n\t\tfuel += calculator(v, fuel)\n\t\tsum += fuel\n\t}\n\treturn sum\n}", "func (m *Map) ReduceIntSum(reduce func(map[interface{}]interface{}) int) int {\n\tresult := 0\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceInt(reduce)\n\t}\n\treturn result\n}", "func (m mathUtil) Sum(values ...float64) float64 {\n\tvar total float64\n\tfor _, v := range values {\n\t\ttotal += v\n\t}\n\treturn total\n}", "func Sum(d []float64) (sum float64) {\n\tfor x := 0; x < len(d); x++ {\n\t\tsum += d[x]\n\t}\n\treturn\n}", "func sum(b *bolt.Bucket, fn func([]byte) int) (int, error) {\n\tsum := 0\n\terr := b.ForEach(func(_, v []byte) error {\n\t\tsum += fn(v)\n\t\treturn nil\n\t})\n\treturn sum, err\n}", "func Sum(in []int) (total int) {\n\ttotal = 0\n\tfor _, v := range in {\n\t\ttotal += v\n\t}\n\treturn\n}", "func (set Int64Set) Sum() int64 {\n\tsum := int64(0)\n\tfor v, _ := range set {\n\t\tsum = sum + v\n\t}\n\treturn sum\n}", "func Sum(xs ...float64) float64 {\n\tvar s float64\n\tfor i := 0; i < len(xs); i++ {\n\t\ts += xs[i]\n\t}\n\n\treturn s\n}", "func DoSum() float64", "func Sum(values ...float64) float64 {\n\tres := 0.0\n\n\tfor _, v := range values {\n\t\tres += v\n\t}\n\n\treturn res\n}", "func Sum(by []string, input []*oproto.ValueStream) []*oproto.ValueStream {\n\toutput := []*oproto.ValueStream{{Variable: input[0].Variable}}\n\tiPos := make([]int, len(input))\n\tfor {\n\t\tvalues := []float64{}\n\t\ttimestamps := []uint64{}\n\t\tfor i := 0; i < len(input); i++ {\n\t\t\tif iPos[i] >= len(input[i].Value) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif input[i] != nil {\n\t\t\t\tvalues = append(values, input[i].Value[iPos[i]].GetDouble())\n\t\t\t\ttimestamps = append(timestamps, input[i].Value[iPos[i]].Timestamp)\n\t\t\t}\n\t\t\tiPos[i]++\n\t\t}\n\t\tif len(values) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tvar total float64\n\t\tfor _, i := range values {\n\t\t\ttotal += i\n\t\t}\n\t\tvar tsTotal uint64\n\t\tfor _, i := range timestamps {\n\t\t\ttsTotal += i\n\t\t}\n\t\toutput[0].Value = append(output[0].Value, value.NewDouble(tsTotal/uint64(len(timestamps)), total))\n\t}\n\treturn output\n}", "func Sum(list []float64) float64 {\n\ttotal := 0.0\n\tfor _, item := range list {\n\t\ttotal += item\n\t}\n\n\treturn total\n}", "func (m Multiples) Sum() int {\n\tvar output int\n\tfor _, i := range m {\n\t\toutput += i\n\t}\n\n\treturn output\n}", "func Sum(xi ...int) int {\n\ts := 0\n\tfor _, v := range xi {\n\t\ts += v\n\t}\n\treturn s\n}", "func sum(nums []int64) (sum int64) {\n\tfor _, v := range nums {\n\t\tsum += v\n\t}\n\treturn\n}", "func Sum(v []float64) float64 {\n\ttotal := 0.0\n\tfor _, number := range v {\n\t\ttotal = total + number\n\t}\n\treturn total\n}", "func Sum(numbers []int) int {\n\tadd := func(acc, x int) int { return acc + x }\n\treturn Reduce(numbers, add, 0)\n}", "func sumResults(sumResult []float64) float64 {\n\tresult := 0.0\n\tfor _, s := range sumResult {\n\t\tresult += s\n\t}\n\treturn result\n}", "func sum(p []float64) []float64 {\n\tv := 0.0\n\tfor _, x := range p {\n\t\tv += x\n\t}\n\treturn []float64{v}\n}", "func Sum(numbers []int) (rs int) {\n\tfor _, number := range numbers {\n\t\trs += number\n\t}\n\treturn\n}", "func sum(xi ...int) int {\n\ttotal := 0\n\tfor _, v := range xi {\n\t\ttotal += v\n\t}\n\treturn total\n}", "func (r Result) Sum() int {\n\tvar s int\n\n\tfor _, n := range r.rolls {\n\t\ts += n.N\n\t}\n\n\treturn s\n}", "func sum(in []int) int {\n\tvar result int\n\tfor i := 0; i < len(in); i++ {\n\t\tresult += in[i]\n\t}\n\treturn result\n}", "func sum(numbers []int) (sum int) {\n\tfor _, number := range numbers {\n\t\tsum += number\n\t}\n\n\treturn\n}", "func Sum(args ...int) (res int) {\n\tfor _, v := range args {\n\t\tres += v\n\t}\n\n\treturn\n}", "func (m *Map) ReduceFloat64Sum(reduce func(map[interface{}]interface{}) float64) float64 {\n\tresult := float64(0)\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceFloat64(reduce)\n\t}\n\treturn result\n}", "func (fn *formulaFuncs) SUM(argsList *list.List) formulaArg {\n\tvar sum float64\n\tfor arg := argsList.Front(); arg != nil; arg = arg.Next() {\n\t\ttoken := arg.Value.(formulaArg)\n\t\tswitch token.Type {\n\t\tcase ArgError:\n\t\t\treturn token\n\t\tcase ArgString:\n\t\t\tif num := token.ToNumber(); num.Type == ArgNumber {\n\t\t\t\tsum += num.Number\n\t\t\t}\n\t\tcase ArgNumber:\n\t\t\tsum += token.Number\n\t\tcase ArgMatrix:\n\t\t\tfor _, row := range token.Matrix {\n\t\t\t\tfor _, value := range row {\n\t\t\t\t\tif num := value.ToNumber(); num.Type == ArgNumber {\n\t\t\t\t\t\tsum += num.Number\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn newNumberFormulaArg(sum)\n}", "func (m *Map) ParallelReduceIntSum(reduce func(map[interface{}]interface{}) int) int {\n\tvar recur func(splits []Split) int\n\trecur = func(splits []Split) int {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceInt(reduce)\n\t\t}\n\t\tvar left, right int\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceInt(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceInt(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}", "func Sum(xs []int) int {\n\tsum := 0\n\tfor _, x := range xs {\n\t\tsum += x\n\t}\n\treturn sum\n}", "func Sum[T ifs.NumberTypes](numbers []T) T {\n\tvar total T = 0\n\tfor _, value := range numbers {\n\t\ttotal += value\n\t}\n\treturn total\n}", "func IntSummation(stream IntStream) IntStream {\n\treturn &intSumStream{stream: stream}\n}", "func Sum(pointsPerSide int, dimension int, fn BzFunc) float64 {\n\tc := 0.0\n\tadd := func(next, total float64) float64 {\n\t\t// add next to total; c holds error compensation information\n\t\ty := next - c\n\t\tt := total + y\n\t\tc = (t - total) - y\n\t\treturn t\n\t}\n\treturn bzReduce(add, 0.0, pointsPerSide, dimension, fn)\n}", "func SumNumbers[K comparable, V Number](m map[K]V) V {\n\tvar s V\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}", "func Sum[T Number](items []T) T {\n\tvar sum T\n\tfor _, item := range items {\n\t\tsum += item\n\t}\n\treturn sum\n}", "func Sum(vals ...float64) float64 {\n\tsum := 0.0\n\tfor _, v := range vals {\n\t\tsum += v\n\t}\n\treturn sum\n}", "func Sum(series []Series) (Series, error) {\n\treturn applyOperator(series, OperatorSum)\n}", "func (bit *BIT) Sum(k int) T {\n\tret := T{0}\n\tfor i := k; i > 0; i -= i & -i {\n\t\tret.val += bit.Bit[i].val\n\t}\n\treturn ret\n}", "func sum(arr []int) (result int) {\n\tfor _, each := range arr {\n\t\tresult += each\n\t}\n\treturn\n}", "func sum(xi ...int) int {\n\tfmt.Printf(\"%T\\n\", xi)\n\ttotal := 0\n\tfor _, v := range xi {\n\t\ttotal += v\n\t}\n\treturn total\n}", "func Sum(numbers []int) (total int) {\n\tfor i := 0; i < len(numbers); i++ {\n\t\ttotal += numbers[i]\n\t}\n\treturn\n}", "func (p *EdwardsPoint) Sum(values []*EdwardsPoint) *EdwardsPoint {\n\tp.Identity()\n\tfor _, v := range values {\n\t\tp.Add(p, v)\n\t}\n\n\treturn p\n}", "func (r *MFI) Sum() float64 {\n\treturn r.Value\n}", "func Sum(numbers []int) int {\n\tsum := 0\n\tfor _, value := range numbers {\n\t\tsum += value\n\t}\n\n\treturn sum\n}", "func (ms SummaryDataPoint) Sum() float64 {\n\treturn (*ms.orig).Sum\n}", "func (i *identity) Sum(b []byte) []byte {\n\treturn append(b, i.data...)\n}", "func sum(args ...float64) float64 {\n\ttotal := float64(0)\n\tfor _, arg := range args {\n\t\ttotal += arg\n\t}\n\treturn total\n}", "func sum(list []int) int {\n\tvar total int\n\tfor _, i := range list {\n\t\ttotal += i\n\t}\n\treturn total\n}", "func (v Vec) Sum() float64 {\n\treturn v.Reduce(func(a, e float64) float64 { return a + e }, 0.0)\n}", "func (list IntList) Map(fn unaryFunc) IntList {\n\tr := []int{}\n\tfor _, e := range list {\n\t\tr = append(r, fn(e))\n\t}\n\treturn IntList(r)\n}", "func Sum(numbers []int) int {\n\n\tsum := 0\n\tfor _, number := range numbers {\n\t\tsum += number\n\n\t}\n\treturn sum\n}", "func sum(arr []float64) float64 {\n\tif len(arr) == 0 {\n\t\treturn 0.0\n\t}\n\n\tresult := 0.0\n\tfor _, v := range arr {\n\t\tresult += v\n\t}\n\treturn result\n}", "func Sum(h *simulator.Handle, vecs ...[]float64) []float64 {\n\tfor _, v := range vecs[1:] {\n\t\tif len(v) != len(vecs[0]) {\n\t\t\tpanic(\"mismatching lengths\")\n\t\t}\n\t}\n\tres := make([]float64, len(vecs[0]))\n\tfor _, v := range vecs {\n\t\tfor i, x := range v {\n\t\t\tres[i] += x\n\t\t}\n\t}\n\n\t// Simulate computation time.\n\th.Sleep(FlopTime * float64(len(vecs)*len(vecs[0])))\n\n\treturn res\n}", "func SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V {\n\tvar s V\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}", "func (ms HistogramDataPoint) Sum() float64 {\n\treturn (*ms.orig).Sum\n}", "func (q *Query) Sum(a interface{}) (int, int, error) {\n\tif !q.isIndexQuery || len(q.indexName) == 0 {\n\t\treturn 0, 0, errors.New(\"Aggregation must use an index Query\")\n\t}\n\n\tq.aggTarget = a\n\tq.isAggQuery = true\n\treturn q.execute()\n}", "func (m *Map) ReduceStringSum(reduce func(map[interface{}]interface{}) string) string {\n\tresult := \"\"\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceString(reduce)\n\t}\n\treturn result\n}", "func (s Series) Sum(force bool) (float64, error) {\n\tif s.elements.Len() == 0 || s.Type() == String || s.Type() == Bool {\n\t\treturn math.NaN(), nil\n\t}\n\tsFloat, err := s.Float(force)\n\tif err != nil {\n\t\treturn math.NaN(), err\n\t}\n\tsum := sFloat[0]\n\tfor i := 1; i < len(sFloat); i++ {\n\t\telem := sFloat[i]\n\t\tsum += elem\n\t}\n\treturn sum, nil\n}", "func Sum(nums []int) int {\n\tvar total int = 0\n\tfor _, n := range nums {\n\t\ttotal += n\n\t}\n\treturn total\n}", "func Sum(a ...int) int {\n\tsum := 0\n\tfor _, i := range a {\n\t\tsum += i\n\t}\n\treturn sum\n}", "func Sum(numbers []int) int {\n\tvar sum int = 0\n\tfor _, num := range numbers {\n\t\tsum += num\n\t}\n\treturn sum\n}", "func (list *IntList) Sum() int {\n\tif list == nil {\n\t\treturn 0\n\t}\n\treturn list.Value + list.Tail.Sum()\n}", "func (rm RowsMap) SumFloat(field string, multiple int) int {\n\tsum := 0\n\tfor _, v := range rm {\n\t\tsum += int(math.Round(v.Float64(field) * float64(multiple)))\n\t}\n\treturn sum\n}", "func Sum(numbers []int) int {\n\tsum := 0\n\tfor i := range numbers {\n\t\tsum += numbers[i]\n\t}\n\n\treturn sum\n}", "func (t Transaction) Sum() (map[string]Amounts, error) {\n\tsum := make(map[string]Amounts)\n\tfor _, posting := range t.Postings {\n\t\tweight, err := posting.Weight()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tv, ok := sum[posting.Account.String()]\n\t\tif !ok {\n\t\t\tv = Amounts{weight}\n\t\t} else {\n\t\t\tv = append(v, weight)\n\t\t}\n\t\tsum[posting.Account.String()] = v\n\t}\n\n\treturn sum, nil\n}", "func (f *fragment) sum(filter *Row, bitDepth uint) (sum int64, count uint64, err error) {\n\t// Compute count based on the existence row.\n\tconsider := f.row(bsiExistsBit)\n\tif filter != nil {\n\t\tconsider = consider.Intersect(filter)\n\t}\n\tcount = consider.Count()\n\n\t// Determine positive & negative sets.\n\tnrow := f.row(bsiSignBit)\n\tprow := consider.Difference(nrow)\n\n\t// Compute the sum based on the bit count of each row multiplied by the\n\t// place value of each row. For example, 10 bits in the 1's place plus\n\t// 4 bits in the 2's place plus 3 bits in the 4's place equals a total\n\t// sum of 30:\n\t//\n\t// 10*(2^0) + 4*(2^1) + 3*(2^2) = 30\n\t//\n\t// Execute once for positive numbers and once for negative. Subtract the\n\t// negative sum from the positive sum.\n\tfor i := uint(0); i < bitDepth; i++ {\n\t\trow := f.row(uint64(bsiOffsetBit + i))\n\n\t\tpsum := int64((1 << i) * row.intersectionCount(prow))\n\t\tnsum := int64((1 << i) * row.intersectionCount(nrow))\n\n\t\t// Squash to reduce the possibility of overflow.\n\t\tsum += psum - nsum\n\t}\n\n\treturn sum, count, nil\n}", "func Sum(nums []int) int {\n\tsum := 0\n\tfor _, num := range nums {\n\t\tsum += num\n\t}\n\treturn sum\n}", "func Sum(nums []int) int {\n\tvar sum int\n\tfor _, num := range nums {\n\t\tsum += num\n\t}\n\treturn sum\n}", "func Sum(numbers ...int) (sum int) {\n\tfor _, n := range numbers {\n\t\tsum += n\n\t}\n\n\treturn\n}", "func (d *state) Sum(in []byte) []byte {\n\t// Make a copy of the original hash so that caller can keep writing\n\t// and summing.\n\tdup := d.clone()\n\thash := make([]byte, dup.outputLen)\n\tdup.Read(hash)\n\treturn append(in, hash...)\n}", "func Sum(numbers ...int) (result int) {\n\n\tfor _, number := range numbers {\n\t\tresult += number\n\t}\n\n\treturn result\n}", "func (a SumAggregator) Aggregate(values []float64) float64 {\n\tresult := 0.0\n\tfor _, v := range values {\n\t\tresult += v\n\t}\n\treturn result\n}", "func Sum(n ...int) int {\n\ts := 0\n\tfor _, v := range n {\n\t\ts += v\n\t}\n\treturn s\n}", "func MapMean(itr Iterator) interface{} {\n\tout := &meanMapOutput{}\n\n\tfor k, v := itr.Next(); k != -1; k, v = itr.Next() {\n\t\tout.Count++\n\t\tswitch n1 := v.(type) {\n\t\tcase float64:\n\t\t\tout.Mean += (n1 - out.Mean) / float64(out.Count)\n\t\tcase int64:\n\t\t\tout.Mean += (float64(n1) - out.Mean) / float64(out.Count)\n\t\t\tout.ResultType = Int64Type\n\t\t}\n\t}\n\n\tif out.Count > 0 {\n\t\treturn out\n\t}\n\n\treturn nil\n}", "func (p *FloatPrice) Sum(x ...FloatPrice) *FloatPrice {\n\tfor _, y := range x {\n\t\tp.Add(y)\n\t}\n\treturn p\n}", "func (l IntList) Map(fn unaryFunc) IntList {\n\tfor i, v := range l {\n\t\tl[i] = fn(v)\n\t}\n\treturn l\n}", "func SumInts(input []int) int {\n\tvar sum int\n\tfor _, value := range input {\n\t\tsum += value\n\t}\n\treturn sum\n}", "func Sum(field string) AggregateFunc {\n\treturn func(start, end string) (string, *dsl.Traversal) {\n\t\tif end == \"\" {\n\t\t\tend = DefaultSumLabel\n\t\t}\n\t\treturn end, __.As(start).Unfold().Values(field).Sum().As(end)\n\t}\n}", "func Sum(vals ...int) int {\n\ttotal := 0\n\n\tfor _, val := range vals {\n\t\ttotal += val\n\t}\n\n\treturn total\n}", "func ExampleIntSet_Sum() {\n\ts1 := gset.NewIntSet()\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tfmt.Println(s1.Sum())\n\n\t// Output:\n\t// 10\n}", "func (c *Aggregator) Sum() (number.Number, error) {\n\treturn c.state.sum, nil\n}", "func Sum(sum int) (out []Triplet) {\n\tfor _, x := range Range(1, sum/2) {\n\t\tif x[0]+x[1]+x[2] == sum {\n\t\t\tout = append(out, x)\n\t\t}\n\t}\n\treturn\n}", "func (i income) sumTotal() float64 {\n\tvar sum float64\n\tfor j := 0; j < len(i); j++ {\n\t\tsum += i[j]\n\t}\n\treturn sum\n}", "func sum(arr []int) int {\n\tvar res int\n\tfor _, v := range arr {\n\t\tres += v\n\t}\n\treturn res\n}", "func (c *Aggregator) Sum() (number.Number, error) {\n\treturn c.value, nil\n}", "func (m *Map) ParallelReduceFloat64Sum(reduce func(map[interface{}]interface{}) float64) float64 {\n\tvar recur func(splits []Split) float64\n\trecur = func(splits []Split) float64 {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceFloat64(reduce)\n\t\t}\n\t\tvar left, right float64\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceFloat64(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceFloat64(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}", "func (d *digest) Sum(in []byte) []byte {\r\n\ts := d.Sum64()\r\n\treturn append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))\r\n}", "func (s *NumSeries) Sum() float64 { return s.sum }", "func (s *server) Sum(ctx context.Context, request *proto.SumRequest) (*proto.SumResult, error) {\n\t// Receive array of ints from request and add them up\n\tresult, err := sum.Calculate(request.Numbers)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\t// Send back result in response\n\treturn &proto.SumResult{Result: result}, nil\n}", "func Sum(a float64, b float64) float64 {\n\treturn a + b\n}", "func (pc *PriceCalculator) Sum(items []Item) int {\n\tsum := 0\n\t// New item can be added without modifying this method\n\tfor _, item := range items {\n\t\tsum += item.GetPrice()\n\t}\n\treturn sum\n}", "func sum(ar []int) int {\n\tsum := 0\n\tfor i := range ar {\n\t\tsum += ar[i]\n\t}\n\treturn sum\n}", "func (d *pmac) Sum(in []byte) []byte {\n\tif d.finished {\n\t\tpanic(\"pmac: already finished\")\n\t}\n\n\tif d.pos == block.Size {\n\t\txor(d.digest[:], d.buf[:])\n\t\txor(d.digest[:], d.lInv[:])\n\t} else {\n\t\txor(d.digest[:], d.buf[:d.pos])\n\t\td.digest[d.pos] ^= 0x80\n\t}\n\n\td.digest.Encrypt(d.c)\n\td.finished = true\n\n\treturn append(in, d.digest[:]...)\n}", "func MapCount(itr Iterator) interface{} {\n\tn := float64(0)\n\tfor k, _ := itr.Next(); k != -1; k, _ = itr.Next() {\n\t\tn++\n\t}\n\tif n > 0 {\n\t\treturn n\n\t}\n\treturn nil\n}" ]
[ "0.82898897", "0.70834136", "0.7009675", "0.6268281", "0.6250325", "0.62207806", "0.6212783", "0.6186013", "0.6122088", "0.6097039", "0.6060919", "0.60580987", "0.60206395", "0.5946613", "0.5935301", "0.5899053", "0.58673143", "0.585274", "0.5808694", "0.5794331", "0.5778898", "0.57704675", "0.5766776", "0.5761855", "0.5739988", "0.57275444", "0.572211", "0.57146156", "0.5713583", "0.56904423", "0.5682613", "0.5676483", "0.5671693", "0.5648764", "0.5637188", "0.5634014", "0.5618402", "0.56172746", "0.56140536", "0.55781686", "0.5570512", "0.5558368", "0.55367965", "0.5515409", "0.5503786", "0.5495262", "0.5494542", "0.5473554", "0.54658794", "0.54614896", "0.5461168", "0.54557747", "0.5443939", "0.5425102", "0.5425011", "0.5418088", "0.53862345", "0.5384914", "0.53819424", "0.5379433", "0.5363043", "0.53627604", "0.5359612", "0.5354294", "0.5333879", "0.5326256", "0.53255606", "0.53168344", "0.5313711", "0.53094625", "0.5299972", "0.5289179", "0.5284556", "0.52807003", "0.52777386", "0.5274981", "0.52738833", "0.5273586", "0.527124", "0.52667135", "0.52596796", "0.52515906", "0.5250481", "0.5246063", "0.52456784", "0.5240783", "0.5233729", "0.5232912", "0.52266467", "0.52237725", "0.5220294", "0.5213921", "0.52063274", "0.520351", "0.52018887", "0.52004963", "0.51945746", "0.5187521", "0.5182761", "0.5180869" ]
0.8373971
0
newReducer returns a new instance of reducer.
func newReducer(e *Executor) *reducer { return &reducer{ executor: e, c: make(chan map[string]interface{}, 0), done: make(chan chan struct{}, 0), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewReducer(f func(*Ctx) *State) *ReduceFunc {\n\treturn &ReduceFunc{Func: f}\n}", "func newReconciler(mgr manager.Manager) *ReconcileCluster {\n\treturn &ReconcileCluster{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetEventRecorderFor(controllerName),\n\t}\n}", "func newReconciler(mgr manager.Manager, actuator Actuator) reconcile.Reconciler {\n\treturn &ReconcileCluster{Client: mgr.GetClient(), scheme: mgr.GetScheme(), actuator: actuator}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileKedaController{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileFunction{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileHive{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n }\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDolevOp{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileNode{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileSdewan{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileJedyKind{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileRestaurant{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileHiveConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme(), restConfig: mgr.GetConfig()}\n}", "func newCompactionState(maxFileSize uint32, snapshot version.Snapshot, compaction *version.Compaction) *compactionState {\n\treturn &compactionState{\n\t\tmaxFileSize: maxFileSize,\n\t\tsnapshot: snapshot,\n\t\tcompaction: compaction,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileKubemanager{Client: mgr.GetClient(), Scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileCollectd{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileAPIScheme{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileControlPlane{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileCanary{Client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileMattermostRestoreDB{client: mgr.GetClient(), scheme: mgr.GetScheme(), state: mattermostv1alpha1.Restoring}\n}", "func (s *Store) ReplaceReducer(r Reducer) {\n\ts.reducer = r\n\ts.Dispatch(INITAction())\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileChe{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tscheme := mgr.GetScheme()\n\toappsv1.AddToScheme(scheme)\n\troutev1.AddToScheme(scheme)\n\t// Best practices\n\treturn &ReconcileCanary{client: mgr.GetClient(), scheme: scheme, recorder: mgr.GetRecorder(controllerName)}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDfJob{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(\n\tmgr manager.Manager,\n\tmutationSystem *mutation.System,\n\ttracker *readiness.Tracker,\n\tgetPod func(context.Context) (*corev1.Pod, error),\n\tkind string,\n\tnewMutationObj func() client.Object,\n\tmutatorFor func(client.Object) (types.Mutator, error),\n\tevents chan event.GenericEvent,\n) *Reconciler {\n\tr := &Reconciler{\n\t\tsystem: mutationSystem,\n\t\tClient: mgr.GetClient(),\n\t\ttracker: tracker,\n\t\tgetPod: getPod,\n\t\tscheme: mgr.GetScheme(),\n\t\treporter: ctrlmutators.NewStatsReporter(),\n\t\tcache: ctrlmutators.NewMutationCache(),\n\t\tgvk: mutationsv1.GroupVersion.WithKind(kind),\n\t\tnewMutationObj: newMutationObj,\n\t\tmutatorFor: mutatorFor,\n\t\tlog: logf.Log.WithName(\"controller\").WithValues(logging.Process, fmt.Sprintf(\"%s_controller\", strings.ToLower(kind))),\n\t\tevents: events,\n\t}\n\tif getPod == nil {\n\t\tr.getPod = r.defaultGetPod\n\t}\n\treturn r\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tr := &Reconciler{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tkubeclient: kubernetes.NewForConfigOrDie(mgr.GetConfig()),\n\t\trecorder: mgr.GetRecorder(controllerName),\n\t}\n\tr.validate = r._validate\n\treturn r\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileStack{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileCockroachDB{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileApplication{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileApplication{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileTaskRun{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tscmFactory: tracker.CreateSCMClient,\n\t\ttaskRuns: make(taskRunTracker),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileExample{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileGrafana{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\thelper: common.NewKubeHelper(),\n\t\tplugins: newPluginsHelper(),\n\t\tconfig: common.GetControllerConfig(),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileUnitedDeployment{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\n\t\trecorder: mgr.GetRecorder(controllerName),\n\t\tsubSetControls: map[subSetType]ControlInterface{\n\t\t\tstatefulSetSubSetType: &StatefulSetControl{Client: mgr.GetClient(), scheme: mgr.GetScheme()},\n\t\t},\n\t}\n}", "func NewStoreWithFakeReducer() (st *Store, getActions func() []Action) {\n\tvar mu sync.Mutex\n\tactions := []Action{}\n\treducer := Reducer(func(ctx context.Context, s *EngineState, action Action) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tactions = append(actions, action)\n\n\t\terrorAction, isErrorAction := action.(ErrorAction)\n\t\tif isErrorAction {\n\t\t\ts.FatalError = errorAction.Error\n\t\t}\n\t})\n\n\tgetActions = func() []Action {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\treturn append([]Action{}, actions...)\n\t}\n\treturn NewStore(reducer, false), getActions\n}", "func newReconciler(mgr manager.Manager, resourceManager resources.ResourceManager, clusterFactory clusterFactory.ClusterFactory) reconcile.Reconciler {\n\treturn &ReconcileServiceInstance{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tclusterFactory: clusterFactory,\n\t\tresourceManager: resourceManager,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileConfigMap{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(cl client.Client, scheme *runtime.Scheme, eventRecorder record.EventRecorder, namespaceReconcilerFactory NamespaceReconcilerFactory, kialiReconciler KialiReconciler, cniConfig cni.Config) *MemberRollReconciler {\n\treturn &MemberRollReconciler{\n\t\tControllerResources: common.ControllerResources{\n\t\t\tClient: cl,\n\t\t\tScheme: scheme,\n\t\t\tEventRecorder: eventRecorder,\n\t\t},\n\t\tcniConfig: cniConfig,\n\t\tnamespaceReconcilerFactory: namespaceReconcilerFactory,\n\t\tkialiReconciler: kialiReconciler,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileRokku{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) {\n\tawsSession, err := aws.GetAwsSessionFromEnv()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ReconcileNode{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tnodeTagger: aws.NewNodeInstanceTagger(ec2.New(awsSession)),\n\t}, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDatabase{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileMinecraft{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileMinecraft{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileIntegration{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tctx := context.Background()\n\tctx, cancel := context.WithCancel(ctx)\n\n\treturn &ReconcileKeycloakClient{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tcancel: cancel,\n\t\tcontext: ctx,\n\t\trecorder: mgr.GetEventRecorderFor(ControllerName),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileApplicationMonitoring{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\thelper: NewKubeHelper(),\n\t\textraParams: make(map[string]string),\n\t}\n}", "func newReconciler(mgr manager.Manager, channelDescriptor *utils.ChannelDescriptor, logger logr.Logger) reconcile.Reconciler {\n\treturn &ReconcileDeployable{\n\t\tKubeClient: mgr.GetClient(),\n\t\tChannelDescriptor: channelDescriptor,\n\t\tLog: logger,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &JenkinsReconciler{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileGitSource{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileForwarding{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileCollection{client: mgr.GetClient(), scheme: mgr.GetScheme(), indexResolver: ResolveIndex}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileTemplate{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tindicesWatchingCancellers: make(map[string]context.CancelFunc),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileJenkinsInstance{\n\t\tClient: mgr.GetClient(),\n\t\tEventRecorder: mgr.GetRecorder(\"JenkinsInstanceController\"),\n\t\tscheme: mgr.GetScheme(),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileSporos{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func New() Action {\n\treturn &action{}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileRethinkDBCluster{client: mgr.GetClient(), config: mgr.GetConfig(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tlogger := logrus.WithFields(logrus.Fields{\"controller\": \"controller_cloudmetrics\"})\n\n\treturn &ReconcileCloudMetrics{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tlogger: logger,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileMysql{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager, c utils.Config) reconcile.Reconciler {\n\tscheme := mgr.GetScheme()\n\treturn &ReconcileReleaseManager{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: scheme,\n\t\tconfig: c,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileGatlingTask{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tr := &Reconciler{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tkubeclient: kubernetes.NewForConfigOrDie(mgr.GetConfig()),\n\t\trecorder: mgr.GetRecorder(controllerName),\n\t}\n\tr.provision = r._provision\n\tr.bind = r._bind\n\tr.delete = r._delete\n\treturn r\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileAerospikeCluster{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileAppService{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileClusterSync{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileContainerJFR{scheme: mgr.GetScheme(), client: mgr.GetClient(),\n\t\tReconcilerTLS: common.NewReconcilerTLS(&common.ReconcilerTLSConfig{\n\t\t\tClient: mgr.GetClient(),\n\t\t}),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDescheduler{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcilerPolkadot{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newLexer(r io.Reader) *lexer {\n\ts := bufio.NewScanner(r)\n\n\treturn &lexer{\n\t\tscanner: s,\n\t\tindexPosition: 0,\n\t}\n\n}", "func newLexer(r io.Reader) *lexer {\n\treturn &lexer{\n\t\tscanner: newScanner(r),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileZdyfapi{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager, managedClient client.Client,\n\tmanagedRecorder record.EventRecorder) reconcile.Reconciler {\n\treturn &ReconcilePolicy{hubClient: mgr.GetClient(), managedClient: managedClient,\n\t\tmanagedRecorder: managedRecorder, scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager, certs *certs.Certs) reconcile.Reconciler {\n\treturn &ReconcileOutgoingPortal{client: mgr.GetClient(), scheme: mgr.GetScheme(), certs: certs}\n}", "func newReconciler(mgr manager.Manager, certs *certs.Certs) reconcile.Reconciler {\n\treturn &ReconcileOutgoingPortal{client: mgr.GetClient(), scheme: mgr.GetScheme(), certs: certs}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileLegacyHeader{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager, clustersetToSubject *helpers.ClustersetSubjectsMapper) reconcile.Reconciler {\n\tvar clusterroleToClusterset = make(map[string]sets.String)\n\treturn &Reconciler{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tclustersetToSubject: clustersetToSubject,\n\t\tclusterroleToClusterset: clusterroleToClusterset,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileServer{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(name string, controllerConfig config.DNSServiceConfig) *reconciler {\n\treturn &reconciler{\n\t\tEnv: common.NewEnv(name, controllerConfig),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDeploymentConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func New(grammarString string) (*grammar.Grammar, tree.Reducer, error) {\n\tparseTree, err := runner.Run(grammarString)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tg, r := evalGrammar(parseTree.(*tree.PN))\n\treturn g, r, nil\n}", "func newLexer(input string) *lexer {\n\tl := &lexer{input: input}\n\tl.readChar()\n\treturn l\n}", "func NewDirectoryDiffMapReducer(dirs int, split []byte) Reducer {\n\treturn &DirectoryDiffMapReducer{\n\t\tDirs: dirs,\n\t\tSplit: split,\n\t}\n}", "func newReconciler(mgr manager.Manager) *ReconcileCertMerge {\n\treturn &ReconcileCertMerge{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileNameService{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileNameService{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tr := &ReconcileRedisClusterBackup{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n\tr.crController = k8sutil.NewCRControl(r.client)\n\tr.directClient = newDirectClient(mgr.GetConfig())\n\tr.jobController = k8sutil.NewJobController(r.directClient)\n\tr.recorder = mgr.GetEventRecorderFor(\"redis-cluster-operator-backup\")\n\treturn r\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tsubscriberMap := make(map[string]appv1alpha1.Subscriber)\n\treturn &ReconcileSubscription{client: mgr.GetClient(), scheme: mgr.GetScheme(), subscriberMap: subscriberMap}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileInfluxdb{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func New(mgr manager.Manager) *Reconciler {\n\treturn &Reconciler{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetEventRecorderFor(\"controller.fluentbit\"),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileWordpress{Client: mgr.GetClient(), scheme: mgr.GetScheme(), recorder: mgr.GetRecorder(controllerName)}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileHostOperatorConfig{client: mgr.GetClient()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileSqlDB{Client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileWebApp{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileVirtualcluster{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t}\n}", "func NewSimpleMapReduce(mappers int, mapQueueSize int, reduceQueueSize int) *SimpleMapReduce {\n return &SimpleMapReduce{\n mappers: mappers,\n hasStarted: false,\n mapFn: func (item interface{}) interface{} {\n return item\n },\n reduceFn: nil,\n workQueue: make(chan interface{}, mapQueueSize),\n reduceQueue: make(chan interface{}, reduceQueueSize),\n mappersFinished: make([]chan bool, mappers),\n reducedFinished: make(chan bool),\n }\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcilePodHealth{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tfactory := &helmreconciler.Factory{CustomizerFactory: &IstioRenderingCustomizerFactory{}}\n\treturn &ReconcileIstioControlPlane{client: mgr.GetClient(), scheme: mgr.GetScheme(), factory: factory}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileFloatingIPSet{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetRecorder(\"floatingipassociate-controller\"),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileCronner{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileWavefrontProxy{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileBlackboxTarget{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileParameterStore{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}" ]
[ "0.78301185", "0.5775491", "0.5751547", "0.57451856", "0.56922376", "0.5683029", "0.563016", "0.55568194", "0.554427", "0.55182046", "0.5517022", "0.5494841", "0.54838514", "0.54738325", "0.5473216", "0.5470393", "0.54662704", "0.5458371", "0.5450969", "0.5445709", "0.5444212", "0.544182", "0.54265404", "0.5408893", "0.5377157", "0.5358093", "0.5354041", "0.53534466", "0.53534466", "0.53391016", "0.5333875", "0.5329414", "0.5319035", "0.53146064", "0.5311404", "0.5302855", "0.5294525", "0.52882344", "0.5262478", "0.52590126", "0.52564114", "0.52564114", "0.5254817", "0.5250024", "0.524179", "0.52390605", "0.5238699", "0.5233068", "0.52303237", "0.52190953", "0.52131027", "0.52089375", "0.5205734", "0.5199314", "0.51895976", "0.51866865", "0.51810575", "0.5180884", "0.51785165", "0.5169807", "0.5167439", "0.51671445", "0.51610065", "0.5158189", "0.5150919", "0.5149376", "0.5132912", "0.5132802", "0.5118297", "0.5116093", "0.5095044", "0.5095044", "0.5093959", "0.5092498", "0.5088482", "0.5087094", "0.5081479", "0.5078389", "0.50737995", "0.5071266", "0.5067892", "0.5065908", "0.5065908", "0.5059468", "0.5042368", "0.5040231", "0.5034377", "0.5031436", "0.5029922", "0.5021737", "0.5017798", "0.5014669", "0.5011328", "0.5005961", "0.5003814", "0.49974543", "0.49860126", "0.4976943", "0.49757886", "0.4969488" ]
0.8353731
0
start begins streaming values from the mappers and reducing them.
func (r *reducer) start() { for _, m := range r.mappers { m.start() } go r.run() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *mapper) start() {\n\tm.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ,\n\t\tm.executor.min, m.executor.max, m.executor.interval)\n\tgo m.run()\n}", "func (w *SimpleMapReduce) Start() *SimpleMapReduce {\n if (w.hasStarted) {\n return w\n }\n\n w.hasStarted = true\n\n for i := 0; i < w.mappers; i++ {\n mapFn := w.mapFn\n mapperFinished := make(chan bool)\n w.mappersFinished[i] = mapperFinished\n\n // Parallel function which performs the map and adds the result to the reduction queue\n go func() {\n for item := range w.workQueue {\n res := mapFn(item)\n w.reduceQueue <- res\n }\n close(mapperFinished)\n }()\n }\n\n // If a reduction function is specified, start it. Otherwise, simply close the reducedFinish\n // channel.\n if (w.reduceFn != nil) {\n go func() {\n w.reduceFn(w.reduceQueue)\n close(w.reducedFinished)\n }()\n } else {\n close(w.reducedFinished)\n }\n\n return w\n}", "func (r *reducer) run() {\nloop:\n\tfor {\n\t\t// Combine all data from the mappers.\n\t\tdata := make(map[string][]interface{})\n\t\tfor _, m := range r.mappers {\n\t\t\tkv, ok := <-m.C()\n\t\t\tif !ok {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tfor k, v := range kv {\n\t\t\t\tdata[k] = append(data[k], v)\n\t\t\t}\n\t\t}\n\n\t\t// Reduce each key.\n\t\tfor k, v := range data {\n\t\t\tr.fn(k, v, r)\n\t\t}\n\t}\n\n\t// Mark the channel as complete.\n\tclose(r.c)\n}", "func (m *mapper) run() {\n\tfor m.itr.NextIterval() {\n\t\tm.fn(m.itr, m)\n\t}\n\tclose(m.c)\n}", "func (l *LocalMapper) Begin(c *influxql.Call, startingTime int64, chunkSize int) error {\n\t// set up the buffers. These ensure that we return data in time order\n\tmapFunc, err := influxql.InitializeMapFunc(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.mapFunc = mapFunc\n\tl.keyBuffer = make([]int64, len(l.cursors))\n\tl.valueBuffer = make([][]byte, len(l.cursors))\n\tl.chunkSize = chunkSize\n\tl.tmin = startingTime\n\n\tvar isCountDistinct bool\n\n\t// determine if this is a raw data query with a single field, multiple fields, or an aggregate\n\tvar fieldName string\n\tif c == nil { // its a raw data query\n\t\tl.isRaw = true\n\t\tif len(l.selectFields) == 1 {\n\t\t\tfieldName = l.selectFields[0]\n\t\t}\n\n\t\t// if they haven't set a limit, just set it to the max int size\n\t\tif l.limit == 0 {\n\t\t\tl.limit = math.MaxUint64\n\t\t}\n\t} else {\n\t\t// Check for calls like `derivative(mean(value), 1d)`\n\t\tvar nested *influxql.Call = c\n\t\tif fn, ok := c.Args[0].(*influxql.Call); ok {\n\t\t\tnested = fn\n\t\t}\n\n\t\tswitch lit := nested.Args[0].(type) {\n\t\tcase *influxql.VarRef:\n\t\t\tfieldName = lit.Val\n\t\tcase *influxql.Distinct:\n\t\t\tif c.Name != \"count\" {\n\t\t\t\treturn fmt.Errorf(\"aggregate call didn't contain a field %s\", c.String())\n\t\t\t}\n\t\t\tisCountDistinct = true\n\t\t\tfieldName = lit.Val\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"aggregate call didn't contain a field %s\", c.String())\n\t\t}\n\n\t\tisCountDistinct = isCountDistinct || (c.Name == \"count\" && nested.Name == \"distinct\")\n\t}\n\n\t// set up the field info if a specific field was set for this mapper\n\tif fieldName != \"\" {\n\t\tfid, err := l.decoder.FieldIDByName(fieldName)\n\t\tif err != nil {\n\t\t\tswitch {\n\t\t\tcase c != nil && c.Name == \"distinct\":\n\t\t\t\treturn fmt.Errorf(`%s isn't a field on measurement %s; to query the unique values for a tag use SHOW TAG VALUES FROM %[2]s WITH KEY = \"%[1]s`, fieldName, l.job.MeasurementName)\n\t\t\tcase isCountDistinct:\n\t\t\t\treturn fmt.Errorf(\"%s isn't a field on measurement %s; count(distinct) on tags isn't yet supported\", fieldName, l.job.MeasurementName)\n\t\t\t}\n\t\t}\n\t\tl.fieldID = fid\n\t\tl.fieldName = fieldName\n\t}\n\n\t// seek the bolt cursors and fill the buffers\n\tfor i, c := range l.cursors {\n\t\t// this series may have never been written in this shard group (time range) so the cursor would be nil\n\t\tif c == nil {\n\t\t\tl.keyBuffer[i] = 0\n\t\t\tl.valueBuffer[i] = nil\n\t\t\tcontinue\n\t\t}\n\t\tk, v := c.Seek(u64tob(uint64(l.job.TMin)))\n\t\tif k == nil {\n\t\t\tl.keyBuffer[i] = 0\n\t\t\tl.valueBuffer[i] = nil\n\t\t\tcontinue\n\t\t}\n\t\tl.cursorsEmpty = false\n\t\tt := int64(btou64(k))\n\t\tl.keyBuffer[i] = t\n\t\tl.valueBuffer[i] = v\n\t}\n\treturn nil\n}", "func (s *JsonEntryCounter) Mapper(r io.Reader, w io.Writer) error {\n\tlog.Printf(\"map_input_file %s\", os.Getenv(\"map_input_file\"))\n\twg, out := mrproto.JsonInternalOutputProtocol(w)\n\n\t// for efficient counting, use an in-memory counter that flushes the least recently used item\n\t// less Mapper output makes for faster sorting and reducing.\n\tcounter := lru.NewLRUCounter(func(k interface{}, v int64) {\n\t\tout <- mrproto.KeyValue{k, v}\n\t}, 100)\n\n\tfor line := range mrproto.RawInputProtocol(r) {\n\t\tvar record map[string]json.RawMessage\n\t\tif err := json.Unmarshal(line, &record); err != nil {\n\t\t\tgomrjob.Counter(\"example_mr\", \"Unmarshal Error\", 1)\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgomrjob.Counter(\"example_mr\", \"Map Lines Read\", 1)\n\t\tcounter.Incr(\"lines_read\", 1)\n\t\tfor k, _ := range record {\n\t\t\tcounter.Incr(k, 1)\n\t\t}\n\t}\n\tcounter.Flush()\n\tclose(out)\n\twg.Wait()\n\treturn nil\n}", "func (graphMinion *graphMinion) start() {\n\tgo func() {\n\t\tdefer graphMinion.wg.Done()\n\t\tfor {\n\n\t\t\t// pull reads from queue until done\n\t\t\tmappingData, ok := <-graphMinion.inputChannel\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif mappingData == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// increment the nodes contained in the mapping window\n\t\t\tmisc.ErrorCheck(graphMinion.graph.IncrementSubPath(mappingData.ContainedNodes, mappingData.Freq))\n\t\t}\n\t}()\n}", "func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}", "func (job *MapOnlyJob) Run() error {\n\tif job.NewMapperF == nil {\n\t\treturn errors.New(\"MapOnlyJob: NewMapperF undefined!\")\n\t}\n\tif job.Source == nil {\n\t\treturn errors.New(\"MapOnlyJob: Source undefined!\")\n\t}\n\ttotalPart := 0\n\tendss := make([][]chan error, 0, len(job.Source))\n\tfor i := range job.Source {\n\t\tpartCount, err := job.Source[i].PartCount()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tends := make([]chan error, 0, partCount)\n\t\tfor part := 0; part < partCount; part++ {\n\t\t\tend := make(chan error, 1)\n\t\t\tends = append(ends, end)\n\t\t\tgo func(i, part, totalPart int, end chan error) {\n\t\t\t\tend <- func() error {\n\t\t\t\t\tmapper := job.NewMapperF(i, part)\n\t\t\t\t\tkey, val := mapper.NewKey(), mapper.NewVal()\n\t\t\t\t\tcs := make([]sophie.Collector, 0, len(job.Dest))\n\t\t\t\t\tfor _, dst := range job.Dest {\n\t\t\t\t\t\tc, err := dst.Collector(totalPart)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"open collector for source %d part %d failed\", i, part)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefer c.Close()\n\t\t\t\t\t\tcs = append(cs, c)\n\t\t\t\t\t}\n\t\t\t\t\titer, err := job.Source[i].Iterator(part)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \" open source %d part %d failed\", i, part)\n\t\t\t\t\t}\n\t\t\t\t\tdefer iter.Close()\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tif err := iter.Next(key, val); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) != io.EOF {\n\t\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"next failed\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err := mapper.Map(key, val, cs); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) == EOM {\n\t\t\t\t\t\t\t\tlog.Print(\"EOM returned, exit early\")\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"mapping %v %v failed\", key, val)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn errorsp.WithStacksAndMessage(mapper.MapEnd(cs), \"map end failed\")\n\t\t\t\t}()\n\t\t\t}(i, part, totalPart, end)\n\t\t\ttotalPart++\n\t\t}\n\t\tendss = append(endss, ends)\n\t}\n\tvar errReturned error\n\tfor _, ends := range endss {\n\t\tfor part, end := range ends {\n\t\t\tlog.Printf(\"Waiting for mapper %d...\", part)\n\t\t\tif err := <-end; err != nil {\n\t\t\t\tlog.Printf(\"Error returned for part %d: %v\", part, err)\n\t\t\t\terrReturned = err\n\t\t\t}\n\t\t\tlog.Printf(\"No error for mapper %d...\", part)\n\t\t}\n\t}\n\treturn errReturned\n}", "func (w *noAggregationStreamWorker) run() {\n\tlog.Debugf(\"Starting streaming routine for the no-aggregation pipeline\")\n\n\tticker := time.NewTicker(noAggWorkerStreamCheckFrequency)\n\tdefer ticker.Stop()\n\tlogPayloads := config.Datadog.GetBool(\"log_payloads\")\n\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\n\tstopped := false\n\tvar stopBlockChan chan struct{}\n\tvar lastStream time.Time\n\n\tfor !stopped {\n\t\tstart := time.Now()\n\t\tserializedSamples := 0\n\n\t\tmetrics.Serialize(\n\t\t\tw.seriesSink,\n\t\t\tw.sketchesSink,\n\t\t\tfunc(seriesSink metrics.SerieSink, sketchesSink metrics.SketchesSink) {\n\t\t\tmainloop:\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\n\t\t\t\t\t// stop signal\n\t\t\t\t\tcase trigger := <-w.stopChan:\n\t\t\t\t\t\tstopped = true\n\t\t\t\t\t\tstopBlockChan = trigger.blockChan\n\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\n\t\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t\tn := time.Now()\n\t\t\t\t\t\tif serializedSamples > 0 && lastStream.Before(n.Add(-time.Second*1)) {\n\t\t\t\t\t\t\tlog.Debug(\"noAggregationStreamWorker: triggering an automatic payloads flush to the forwarder (no traffic since 1s)\")\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\n\t\t\t\t\t// receiving samples\n\t\t\t\t\tcase samples := <-w.samplesChan:\n\t\t\t\t\t\tlog.Debugf(\"Streaming %d metrics from the no-aggregation pipeline\", len(samples))\n\t\t\t\t\t\tfor _, sample := range samples {\n\t\t\t\t\t\t\t// enrich metric sample tags\n\t\t\t\t\t\t\tsample.GetTags(w.taggerBuffer, w.metricBuffer)\n\t\t\t\t\t\t\tw.metricBuffer.AppendHashlessAccumulator(w.taggerBuffer)\n\n\t\t\t\t\t\t\t// turns this metric sample into a serie\n\t\t\t\t\t\t\tvar serie metrics.Serie\n\t\t\t\t\t\t\tserie.Name = sample.Name\n\t\t\t\t\t\t\tserie.Points = []metrics.Point{{Ts: sample.Timestamp, Value: sample.Value}}\n\t\t\t\t\t\t\tserie.Tags = tagset.CompositeTagsFromSlice(w.metricBuffer.Copy())\n\t\t\t\t\t\t\tserie.Host = sample.Host\n\t\t\t\t\t\t\t// ignored when late but mimic dogstatsd traffic here anyway\n\t\t\t\t\t\t\tserie.Interval = 10\n\t\t\t\t\t\t\tw.seriesSink.Append(&serie)\n\n\t\t\t\t\t\t\tw.taggerBuffer.Reset()\n\t\t\t\t\t\t\tw.metricBuffer.Reset()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlastStream = time.Now()\n\n\t\t\t\t\t\tserializedSamples += len(samples)\n\t\t\t\t\t\tif serializedSamples > w.maxMetricsPerPayload {\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, func(serieSource metrics.SerieSource) {\n\t\t\t\tsendIterableSeries(w.serializer, start, serieSource)\n\t\t\t}, func(sketches metrics.SketchesSource) {\n\t\t\t\t// noop: we do not support sketches in the no-agg pipeline.\n\t\t\t})\n\n\t\tif stopped {\n\t\t\tbreak\n\t\t}\n\n\t\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\t}\n\n\tif stopBlockChan != nil {\n\t\tclose(stopBlockChan)\n\t}\n}", "func (s *seriesValueGenerator) Start() error { return nil }", "func (theBoss *theBoss) mapReads() error {\n\ttheBoss.alignments = make(chan *sam.Record, BUFFERSIZE)\n\n\t// set up the BAM if exact alignment is requested\n\tif !theBoss.info.Sketch.NoExactAlign {\n\t\tif err := theBoss.setupBAM(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// setup the waitgroups for the sketching and graphing minions\n\tvar wg1 sync.WaitGroup\n\tvar wg2 sync.WaitGroup\n\n\t// launch the graph minions (one minion per graph in the index)\n\ttheBoss.graphMinionRegister = make([]*graphMinion, len(theBoss.info.Store))\n\tfor _, graph := range theBoss.info.Store {\n\n\t\t// create, start and register the graph minion\n\t\tminion := newGraphMinion(theBoss, graph)\n\t\twg2.Add(1)\n\t\tminion.start(&wg2)\n\t\ttheBoss.graphMinionRegister[graph.GraphID] = minion\n\t}\n\n\t// launch the sketching minions (one per CPU)\n\tfor i := 0; i < theBoss.info.NumProc; i++ {\n\t\twg1.Add(1)\n\t\tgo func(workerNum int) {\n\t\t\tdefer wg1.Done()\n\n\t\t\t// keep a track of what this minion does\n\t\t\treceivedReads := 0\n\t\t\tmappedCount := 0\n\t\t\tmultimappedCount := 0\n\n\t\t\t// start the main processing loop\n\t\t\tfor {\n\n\t\t\t\t// pull reads from queue until done\n\t\t\t\tread, ok := <-theBoss.reads\n\t\t\t\tif !ok {\n\n\t\t\t\t\t// update the counts\n\t\t\t\t\ttheBoss.Lock()\n\t\t\t\t\ttheBoss.receivedReadCount += receivedReads\n\t\t\t\t\ttheBoss.mappedCount += mappedCount\n\t\t\t\t\ttheBoss.multimappedCount += multimappedCount\n\t\t\t\t\ttheBoss.Unlock()\n\n\t\t\t\t\t// end the sketching minion\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// get sketch for read\n\t\t\t\treadSketch, err := read.RunMinHash(theBoss.info.KmerSize, theBoss.info.SketchSize, false, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// get the number of k-mers in the sequence\n\t\t\t\tkmerCount := (len(read.Seq) - theBoss.info.KmerSize) + 1\n\n\t\t\t\t// query the LSH ensemble\n\t\t\t\tresults, err := theBoss.info.db.Query(readSketch, kmerCount, theBoss.info.ContainmentThreshold)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// if multiple graphs are returned, we need to deep copy the read\n\t\t\t\tdeepCopy := false\n\t\t\t\tif len(results) > 1 {\n\t\t\t\t\tdeepCopy = true\n\t\t\t\t}\n\n\t\t\t\t// augment graphs and optionally perform exact alignment\n\t\t\t\tfor graphID, hits := range results {\n\t\t\t\t\tif deepCopy {\n\t\t\t\t\t\treadCopy := *read.DeepCopy()\n\t\t\t\t\t\ttheBoss.graphMinionRegister[graphID].inputChannel <- &graphMinionPair{hits, readCopy}\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttheBoss.graphMinionRegister[graphID].inputChannel <- &graphMinionPair{hits, *read}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// update counts\n\t\t\t\treceivedReads++\n\t\t\t\tif len(results) > 0 {\n\t\t\t\t\tmappedCount++\n\t\t\t\t}\n\t\t\t\tif len(results) > 1 {\n\t\t\t\t\tmultimappedCount++\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// control the channels\n\tgo func() {\n\n\t\t// wait for the sketching minions to finish\n\t\twg1.Wait()\n\n\t\t// shut down the graph minions input channels\n\t\tfor _, minion := range theBoss.graphMinionRegister {\n\t\t\tclose(minion.inputChannel)\n\t\t}\n\n\t\t// wait for the graph minions to finish\n\t\twg2.Wait()\n\n\t\t// end the alignment writer\n\t\tclose(theBoss.alignments)\n\n\t}()\n\n\t// collect the alignments and write them\n\tfor record := range theBoss.alignments {\n\t\t// check the record is valid\n\t\t//if sam.IsValidRecord(record) == false {\n\t\t//\tos.Exit(1)\n\t\t//}\n\t\ttheBoss.alignmentCount++\n\t\tif err := theBoss.bamwriter.Write(record); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// close the bam writer and return to the completed boss to the pipeline\n\tvar err error\n\tif !theBoss.info.Sketch.NoExactAlign {\n\t\terr = theBoss.bamwriter.Close()\n\t}\n\treturn err\n}", "func (s *Stream) FlatMap(name string, mapper FlatMapper) *Stream {\n\tp := NewFlatMapProcessor(mapper)\n\tn := s.tp.AddProcessor(name, p, s.parents)\n\n\treturn newStream(s.tp, []Node{n})\n}", "func mapSum(itr Iterator, m *mapper) {\n\tn := float64(0)\n\tfor k, v := itr.Next(); k != 0; k, v = itr.Next() {\n\t\tn += v.(float64)\n\t}\n\tm.emit(itr.Time(), n)\n}", "func MapReduce(works chan MapReducable, workers int) {\n\tserials := make(chan chan Reducable, workers)\n\tgo func() {\n\t\tfor work := range works {\n\t\t\twork := work\n\t\t\tserial := make(chan Reducable)\n\t\t\tserials <- serial\n\t\t\tgo func() {\n\t\t\t\tserial <- work.Map()\n\t\t\t}()\n\t\t}\n\t\tclose(serials)\n\t}()\n\tfor serial := range serials {\n\t\tr := <-serial\n\t\tr.Reduce()\n\t}\n}", "func (p *Pipe) start() {\n\tp.cancel = make(chan struct{})\n\terrcList := make([]<-chan error, 0, 1+len(p.processors)+len(p.sinks))\n\t// start pump\n\tout, errc := p.pump.run(p.cancel, p.ID(), p.provide, p.consume, p.sampleRate, p.metric)\n\terrcList = append(errcList, errc)\n\n\t// start chained processesing\n\tfor _, proc := range p.processors {\n\t\tout, errc = proc.run(p.cancel, p.ID(), out, p.sampleRate, p.metric)\n\t\terrcList = append(errcList, errc)\n\t}\n\n\tsinkErrcList := p.broadcastToSinks(out)\n\terrcList = append(errcList, sinkErrcList...)\n\tp.errc = mergeErrors(errcList...)\n}", "func (ns *NsonSerializer) startMap(field string) {\n\tif field != \"\" {\n\t\tns.startField(field)\n\t}\n\tns.writer.WriteByte(byte(types.Map))\n\toff := ns.writer.Size()\n\tns.writer.WriteInt(0) // size in bytes\n\tns.writer.WriteInt(0) // number of elements\n\tns.offsetStack = append(ns.offsetStack, off)\n\tns.sizeStack = append(ns.sizeStack, 0)\n}", "func (rb *routerBase) Start(ctx context.Context, wg *sync.WaitGroup, ctxCancel context.CancelFunc) {\n\twg.Add(len(rb.outputs))\n\tfor i := range rb.outputs {\n\t\tgo func(ctx context.Context, rb *routerBase, ro *routerOutput, wg *sync.WaitGroup) {\n\t\t\tvar span *tracing.Span\n\t\t\tif rb.statsCollectionEnabled {\n\t\t\t\tctx, span = execinfra.ProcessorSpan(ctx, \"router output\")\n\t\t\t\tspan.SetTag(execinfrapb.StreamIDTagKey, ro.streamID)\n\t\t\t\tro.stats.Inputs = make([]execinfrapb.InputStats, 1)\n\t\t\t}\n\n\t\t\tdrain := false\n\t\t\tstreamStatus := execinfra.NeedMoreRows\n\t\t\tro.mu.Lock()\n\t\t\tfor {\n\t\t\t\t// Send any metadata that has been buffered. Note that we are not\n\t\t\t\t// maintaining the relative ordering between metadata items and rows\n\t\t\t\t// (but it doesn't matter).\n\t\t\t\tif len(ro.mu.metadataBuf) > 0 {\n\t\t\t\t\tm := ro.mu.metadataBuf[0]\n\t\t\t\t\t// Reset the value so any objects it refers to can be garbage\n\t\t\t\t\t// collected.\n\t\t\t\t\tro.mu.metadataBuf[0] = nil\n\t\t\t\t\tro.mu.metadataBuf = ro.mu.metadataBuf[1:]\n\n\t\t\t\t\tro.mu.Unlock()\n\n\t\t\t\t\trb.semaphore <- struct{}{}\n\t\t\t\t\tstatus := ro.stream.Push(nil /*row*/, m)\n\t\t\t\t\t<-rb.semaphore\n\n\t\t\t\t\trb.updateStreamState(&streamStatus, status)\n\t\t\t\t\tro.mu.Lock()\n\t\t\t\t\tro.mu.streamStatus = streamStatus\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif !drain {\n\t\t\t\t\t// Send any rows that have been buffered. We grab multiple rows at a\n\t\t\t\t\t// time to reduce contention.\n\t\t\t\t\tif rows, err := ro.popRowsLocked(ctx); err != nil {\n\t\t\t\t\t\tro.mu.Unlock()\n\t\t\t\t\t\trb.fwdMetadata(&execinfrapb.ProducerMetadata{Err: err})\n\t\t\t\t\t\tro.mu.Lock()\n\t\t\t\t\t\tatomic.StoreUint32(&rb.aggregatedStatus, uint32(execinfra.DrainRequested))\n\t\t\t\t\t\tdrain = true\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if len(rows) > 0 {\n\t\t\t\t\t\tro.mu.Unlock()\n\t\t\t\t\t\trb.semaphore <- struct{}{}\n\t\t\t\t\t\tfor _, row := range rows {\n\t\t\t\t\t\t\tstatus := ro.stream.Push(row, nil)\n\t\t\t\t\t\t\trb.updateStreamState(&streamStatus, status)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t<-rb.semaphore\n\t\t\t\t\t\tif rb.statsCollectionEnabled {\n\t\t\t\t\t\t\tro.stats.Inputs[0].NumTuples.Add(int64(len(rows)))\n\t\t\t\t\t\t}\n\t\t\t\t\t\tro.mu.Lock()\n\t\t\t\t\t\tro.mu.streamStatus = streamStatus\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// No rows or metadata buffered; see if the producer is done.\n\t\t\t\tif ro.mu.producerDone {\n\t\t\t\t\tif rb.statsCollectionEnabled {\n\t\t\t\t\t\tro.stats.Exec.MaxAllocatedMem.Set(uint64(ro.memoryMonitor.MaximumBytes()))\n\t\t\t\t\t\tro.stats.Exec.MaxAllocatedDisk.Set(uint64(ro.diskMonitor.MaximumBytes()))\n\t\t\t\t\t\tspan.RecordStructured(&ro.stats)\n\t\t\t\t\t\tspan.Finish()\n\t\t\t\t\t\tif trace := execinfra.GetTraceData(ctx); trace != nil {\n\t\t\t\t\t\t\tro.mu.Unlock()\n\t\t\t\t\t\t\trb.semaphore <- struct{}{}\n\t\t\t\t\t\t\tstatus := ro.stream.Push(nil, &execinfrapb.ProducerMetadata{TraceData: trace})\n\t\t\t\t\t\t\trb.updateStreamState(&streamStatus, status)\n\t\t\t\t\t\t\t<-rb.semaphore\n\t\t\t\t\t\t\tro.mu.Lock()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tro.stream.ProducerDone()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\t// Nothing to do; wait.\n\t\t\t\tro.mu.cond.Wait()\n\t\t\t}\n\t\t\tro.mu.rowContainer.Close(ctx)\n\t\t\tro.mu.Unlock()\n\n\t\t\tro.rowBufToPushFromAcc.Close(ctx)\n\t\t\tro.memoryMonitor.Stop(ctx)\n\t\t\tro.diskMonitor.Stop(ctx)\n\t\t\tro.rowBufToPushFromMon.Stop(ctx)\n\n\t\t\twg.Done()\n\t\t}(ctx, rb, &rb.outputs[i], wg)\n\t}\n}", "func (this *Worker) streamedResult(line string) StreamResult {\n if this.mapReader == nil || this.mapWriter == nil {\n return emptyStreamResult\n }\n\n _, err := this.mapWriter.WriteString(line)\n this.mapWriter.Flush() // must flush, else script will not get this line\n if err != nil {\n if err != io.EOF {\n panic(err)\n }\n }\n\n mapperLine, err := this.mapReader.ReadString(EOL)\n if err != nil {\n panic(err)\n }\n return StreamResult(mapperLine)\n}", "func (g *Gosmonaut) Start(\n\ttypes OSMTypeSet,\n\tfuncEntityNeeded func(OSMType, OSMTags) bool,\n) {\n\t// Block until previous run finished\n\tg.lock.Lock()\n\tg.stream = make(chan osmPair, entitiesPerPrimitiveBlock)\n\n\t// Init vars\n\tg.funcEntityNeeded = funcEntityNeeded\n\tg.types = types\n\n\tgo func() {\n\t\t// Decode\n\t\tg.decode()\n\n\t\t// Finish\n\t\tclose(g.stream)\n\t\tg.lock.Unlock()\n\t}()\n}", "func (w *SimpleMapReduce) Reduce (reduceFn ReduceFn) *SimpleMapReduce {\n w.reduceFn = reduceFn\n return w\n}", "func (s *streamStrategy) Start() {\n\tgo func() {\n\t\tfor msg := range s.inputChan {\n\t\t\tif msg.Origin != nil {\n\t\t\t\tmsg.Origin.LogSource.LatencyStats.Add(msg.GetLatency())\n\t\t\t}\n\t\t\ts.outputChan <- &message.Payload{Messages: []*message.Message{msg}, Encoded: msg.Content, UnencodedSize: len(msg.Content)}\n\t\t}\n\t\ts.done <- struct{}{}\n\t}()\n}", "func (w *Walker) startProcessing() {\n\tdoStart := false\n\tw.pipe.RLock()\n\tif w.pipe.filters == nil { // no processing up to now => start with initial node\n\t\tw.pipe.pushSync(w.initial, 0) // input is buffered, will return immediately\n\t\tdoStart = true // yes, we will have to start the pipeline\n\t}\n\tw.pipe.RUnlock()\n\tif doStart { // ok to be outside mutex as other goroutines will check pipe.empty()\n\t\tw.pipe.startProcessing() // must be outside of mutex lock\n\t}\n}", "func (p *literalProcessor) start() { go p.run() }", "func (w *SimpleMapReduce) Map(mapFn MapFn) *SimpleMapReduce {\n w.mapFn = mapFn\n return w\n}", "func (s *Stream) FlatMap(f interface{}) *Stream {\n\top, err := unary.FlatMapFunc(f)\n\tif err != nil {\n\t\ts.drainErr(err)\n\t\treturn s\n\t}\n\ts.Transform(op) // add flatmap as unary op\n\ts.ReStream() // add streamop to unpack flatmap result\n\treturn s\n}", "func (builder *streamBuilder[K, F]) Map(mapper Mapper[K, F]) *streamBuilder[K, F] {\n\tbuilder.stream.Mappers = append(builder.stream.Mappers, mapper)\n\treturn builder\n}", "func _reduce(fn redfn, total int, c chan dict) dict {\n\tfinalMap := make(dict)\n\n\tfor worker := 0; worker < total; worker++ {\n\t\tm := <-c\n\t\tfn(finalMap, m)\n\t}\n\n\treturn finalMap\n}", "func (collector *Collector) Start() {\n\t// Begin our internal processing first\n\tgo collector.process()\n\n\t// Start the prospector to start collecting data\n\tcollector.prospector.Start()\n}", "func (c *Collector) Start() {\n\tgo c.Source.Start()\n\tc.collect()\n}", "func (phStats *passwordHasherStats) startAccumulating() {\n\tgo phStats.accumulateStats()\n}", "func (jr *joinReader) mainLoop() error {\n\tprimaryKeyPrefix := sqlbase.MakeIndexKeyPrefix(&jr.desc, jr.index.ID)\n\n\tvar alloc sqlbase.DatumAlloc\n\tspans := make(sqlbase.Spans, 0, joinReaderBatchSize)\n\n\tif log.V(2) {\n\t\tlog.Infof(jr.ctx, \"starting (filter: %s)\", jr.filter)\n\t\tdefer log.Infof(jr.ctx, \"exiting\")\n\t}\n\n\tfor {\n\t\t// TODO(radu): figure out how to send smaller batches if the source has\n\t\t// a soft limit (perhaps send the batch out if we don't get a result\n\t\t// within a certain amount of time).\n\t\tfor spans = spans[:0]; len(spans) < joinReaderBatchSize; {\n\t\t\trow, err := jr.input.NextRow()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif row == nil {\n\t\t\t\tif len(spans) == 0 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tkey, err := jr.generateKey(row, &alloc, primaryKeyPrefix)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tspans = append(spans, sqlbase.Span{\n\t\t\t\tStart: key,\n\t\t\t\tEnd: key.PrefixEnd(),\n\t\t\t})\n\t\t}\n\n\t\terr := jr.fetcher.StartScan(jr.flowCtx.txn, spans, 0)\n\t\tif err != nil {\n\t\t\tlog.Errorf(jr.ctx, \"scan error: %s\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t// TODO(radu): we are consuming all results from a fetch before starting\n\t\t// the next batch. We could start the next batch early while we are\n\t\t// outputting rows.\n\t\tfor {\n\t\t\toutRow, err := jr.nextRow()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif outRow == nil {\n\t\t\t\t// Done.\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif log.V(3) {\n\t\t\t\tlog.Infof(jr.ctx, \"pushing row %s\\n\", outRow)\n\t\t\t}\n\t\t\t// Push the row to the output RowReceiver; stop if they don't need more\n\t\t\t// rows.\n\t\t\tif !jr.output.PushRow(outRow) {\n\t\t\t\tif log.V(2) {\n\t\t\t\t\tlog.Infof(jr.ctx, \"no more rows required\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif len(spans) != joinReaderBatchSize {\n\t\t\t// This was the last batch.\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (d *dataUpdateTracker) startCollector(ctx context.Context) {\n\tfor in := range d.input {\n\t\tbucket, _ := path2BucketObjectWithBasePath(\"\", in)\n\t\tif bucket == \"\" {\n\t\t\tif d.debug && len(in) > 0 {\n\t\t\t\tconsole.Debugf(color.Green(\"dataUpdateTracker:\")+\" no bucket (%s)\\n\", in)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif isReservedOrInvalidBucket(bucket, false) {\n\t\t\tcontinue\n\t\t}\n\t\tsplit := splitPathDeterministic(in)\n\n\t\t// Add all paths until done.\n\t\td.mu.Lock()\n\t\tfor i := range split {\n\t\t\td.Current.bf.AddString(hashPath(path.Join(split[:i+1]...)).String())\n\t\t}\n\t\td.dirty = d.dirty || len(split) > 0\n\t\td.mu.Unlock()\n\t}\n}", "func (m *mapper) stop() { syncClose(m.done) }", "func (a *Accumulator)Start(){\n\tgo func() {\n\t\tfor stats := range a.StatsChan {\n\t\t\ta.mu.Lock()\n\t\t\ta.Stats = append(a.Stats, stats)\n\t\t\ta.mu.Unlock()\n\t\t\tif ( len(a.Stats) >= a.MaxResponses) {\n\t\t\t\tLog(\"top\", \"All requests received\")\n\t\t\t\ta.Done <- true\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor stats := range a.OverallStatsChan {\n\t\t\ta.mu.Lock()\n\t\t\ta.OverallStats = append(a.OverallStats, stats)\n\t\t\ta.mu.Unlock()\n\t\t}\n\t}()\n}", "func (m *SequentialMaster) Start() {\n\tm.active = true\n\n\tw := *NewWorker(m.JobName, m.MapF, m.ReduceF)\n\n\tfor i, file := range m.InputFileNames {\n\t\tw.DoMap(file, uint(i), m.NumReducers);\n\t}\n\n\tfor i := uint(0); i < m.NumReducers; i++ {\n\t\tw.DoReduce(i, uint(len(m.InputFileNames)))\n\t}\n}", "func (a *aggregator) Run() {\n\tgo a.submitter()\n\n\tfor m := range a.in {\n\t\tfor _, out_m := range a.process(m) {\n\t\t\ta.out <- out_m\n\t\t}\n\t}\n}", "func (l *lexer) run() {\n\tfor state := lexSchema; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items) // No more tokens will be delivered.\n}", "func (s Stream) Map(fn func(r Record) (Record, error)) Stream {\n\treturn s.Pipe(func() func(r Record) (Record, error) {\n\t\treturn fn\n\t})\n}", "func MapReduce(input int, reduce func(results []int) int, tasks ...Task) (int, error) {\n\t// 1. tasks => taskCh (chan Task)\n\t// 2. Limited number of gophers work on tasks\n\t// - output => output channel\n\t// 3. goroutine pull data from output channel\n\t// - get a slice of output\n\t// 4. feed reduce func with the slice\n\t// * if any error occured, abort & clean goroutines\n\t// - taskCh = drain\n\t// - outCh = close & drain\n\ttype taskResult struct {\n\t\to int\n\t\te error\n\t}\n\t// for abort\n\tabort := make(chan struct{})\n\n\tabortSwicher := func() bool {\n\t\tselect {\n\t\tcase <-abort:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// step 1\n\ttaskCh := make(chan Task)\n\tgo func() {\n\t\tfor _, t := range tasks {\n\t\t\ttaskCh <- t\n\t\t}\n\t\tclose(taskCh)\n\t}()\n\t// step 2\n\t// in order to close channel(outCh), use WaitGroup\n\tvar wg sync.WaitGroup\n\tworkerNum := 4\n\toutCh := make(chan taskResult)\n\terrCh := make(chan taskResult, 10) // blocked without a buffer\n\tfor i := 0; i < workerNum; i++ {\n\t\tgo func() {\n\t\t\tfor t := range taskCh {\n\t\t\t\tif abortSwicher() {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\twg.Add(1)\n\t\t\t\to, e := t.Execute(input)\n\t\t\t\tif e != nil && !abortSwicher() {\n\t\t\t\t\terrCh <- taskResult{o, e}\n\t\t\t\t\tclose(abort)\n\t\t\t\t} else {\n\t\t\t\t\toutCh <- taskResult{o, e}\n\t\t\t\t}\n\t\t\t\twg.Done()\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(outCh)\n\t\tclose(errCh)\n\t}()\n\n\t// step 3\n\tres := []int{}\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor o := range outCh {\n\t\t\tif abortSwicher() {\n\t\t\t\tfor range outCh {\n\t\t\t\t} // drain outCh\n\t\t\t\tfor range taskCh {\n\t\t\t\t} // drain taskCh\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tres = append(res, o.o)\n\t\t}\n\t\tclose(done)\n\t}()\n\t// step 4\n\t<-done\n\n\tif abortSwicher() {\n\t\to := <-errCh\n\t\tfor range errCh {\n\t\t} // drain errCh\n\t\treturn o.o, o.e\n\t} else {\n\t\treturn reduce(res), nil\n\t}\n}", "func (b *Basic) start() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Basic.start -- \", r)\n\t\t\tgo b.start()\n\t\t}\n\t}()\n\n\tfor rec := range b.in {\n\t\tif rec.flush != nil {\n\t\t\tb.flush(rec.flush)\n\t\t} else {\n\t\t\terr := b.w.Write(rec)\n\t\t\tif err != nil {\n\t\t\t\tb.incErrorCounter()\n\t\t\t\trec.Logger().Logr().ReportError(err)\n\t\t\t} else {\n\t\t\t\tb.incLoggedCounter()\n\t\t\t}\n\t\t}\n\t}\n\tclose(b.done)\n}", "func (r *streamRangeVectorIterator) load(start, end int64) {\n\tfor lbs, sample, hasNext := r.iter.Peek(); hasNext; lbs, sample, hasNext = r.iter.Peek() {\n\t\tif sample.Timestamp > end {\n\t\t\t// not consuming the iterator as this belong to another range.\n\t\t\treturn\n\t\t}\n\t\t// the lower bound of the range is not inclusive\n\t\tif sample.Timestamp <= start {\n\t\t\t_ = r.iter.Next()\n\t\t\tcontinue\n\t\t}\n\t\t// adds the sample.\n\t\tvar rangeAgg RangeStreamingAgg\n\t\tvar ok bool\n\t\trangeAgg, ok = r.windowRangeAgg[lbs]\n\t\tif !ok {\n\t\t\tvar metric labels.Labels\n\t\t\tif _, ok = r.metrics[lbs]; !ok {\n\t\t\t\tvar err error\n\t\t\t\tmetric, err = promql_parser.ParseMetric(lbs)\n\t\t\t\tif err != nil {\n\t\t\t\t\t_ = r.iter.Next()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tr.metrics[lbs] = metric\n\t\t\t}\n\n\t\t\t// never err here ,we have check error at evaluator.go rangeAggEvaluator() func\n\t\t\trangeAgg, _ = streamingAggregator(r.r)\n\t\t\tr.windowRangeAgg[lbs] = rangeAgg\n\t\t}\n\t\tp := promql.Point{\n\t\t\tT: sample.Timestamp,\n\t\t\tV: sample.Value,\n\t\t}\n\t\trangeAgg.agg(p)\n\t\t_ = r.iter.Next()\n\t}\n}", "func (ctx Context) Map(input chan float64, f MapFunc) (output chan float64) {\n\toutput = make(chan float64, ctx.StreamBufferSize)\n\n\tgo func() {\n\t\tdefer close(output)\n\n\t\tfor x := range input {\n\t\t\toutput <- f(x)\n\t\t}\n\t}()\n\n\treturn output\n}", "func (g Gen) FlatMap(f func(interface{}) Gen) Gen {\n\treturn func(genParams *GenParameters) *GenResult {\n\t\tresult := g(genParams)\n\t\tvalue, ok := result.Retrieve()\n\t\tif ok {\n\t\t\treturn f(value)(genParams)\n\t\t}\n\t\tmappedZero := f(reflect.Zero(result.ResultType).Interface())(genParams)\n\t\treturn &GenResult{\n\t\t\tShrinker: NoShrinker,\n\t\t\tresult: nil,\n\t\t\tLabels: result.Labels,\n\t\t\tResultType: mappedZero.ResultType,\n\t\t}\n\t}\n}", "func MapSpread(itr Iterator) interface{} {\n\tout := &spreadMapOutput{}\n\tpointsYielded := false\n\tvar val float64\n\n\tfor k, v := itr.Next(); k != -1; k, v = itr.Next() {\n\t\tswitch n := v.(type) {\n\t\tcase float64:\n\t\t\tval = n\n\t\tcase int64:\n\t\t\tval = float64(n)\n\t\t\tout.Type = Int64Type\n\t\t}\n\n\t\t// Initialize\n\t\tif !pointsYielded {\n\t\t\tout.Max = val\n\t\t\tout.Min = val\n\t\t\tpointsYielded = true\n\t\t}\n\t\tout.Max = math.Max(out.Max, val)\n\t\tout.Min = math.Min(out.Min, val)\n\t}\n\tif pointsYielded {\n\t\treturn out\n\t}\n\treturn nil\n}", "func (l *TimestreamBulkLoad) RunScanner(r io.Reader, syncChanDone chan int) {\n\tl.scanFinished = false\n\tl.itemsRead = 0\n\tl.bytesRead = 0\n\tl.valuesRead = 0\n\n\tvar n int\n\tvar deadline time.Time\n\tif bulk_load.Runner.TimeLimit > 0 {\n\t\tdeadline = time.Now().Add(bulk_load.Runner.TimeLimit)\n\t}\n\n\tstart := time.Now()\n\tbr := bufio.NewReaderSize(r, 32<<20)\n\tdec := gob.NewDecoder(br)\n\nouter:\n\tfor {\n\t\tif l.itemsRead == bulk_load.Runner.ItemLimit {\n\t\t\tbreak\n\t\t}\n\n\t\tinput := l.bufPool.Get().(*timestreamwrite.WriteRecordsInput)\n\t\terr := dec.Decode(input)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"decoder error: %v\", err)\n\t\t}\n\n\t\tvar batch Batch\n\t\tif bulk_load.Runner.BatchSize > 1 {\n\t\t\tbatch =\tl.aggregate(input)\n\t\t\tl.reuse(input)\n\t\t} else {\n\t\t\tbatch = input\n\t\t}\n\n\t\tl.itemsRead++\n\t\tn = len(batch.Records)\n\n\t\tif n >= bulk_load.Runner.BatchSize {\n\t\t\t//l.bytesRead += int64(len(input)) // TODO\n\t\t\tl.batchChan <- batch\n\t\t\tn = 0\n\t\t\tif bulk_load.Runner.TimeLimit > 0 && time.Now().After(deadline) {\n\t\t\t\tbulk_load.Runner.SetPrematureEnd(\"Timeout elapsed\")\n\t\t\t\tbreak outer\n\t\t\t}\n\t\t}\n\n\t\t_ = start\n\t\tselect {\n\t\tcase <-syncChanDone:\n\t\t\tbreak outer\n\t\tdefault:\n\t\t}\n\t}\n\n\t// send outstanding batches\n\tif bulk_load.Runner.BatchSize > 1 {\n\t\t//fmt.Println(\"send outstanding batches\")\n\t\tfor _, c := range l.aggregates {\n\t\t\tif len(c.Records) > 0 {\n\t\t\t\tl.batchChan <- c\n\t\t\t}\n\t\t}\n\t}\n\n\t// Closing inputDone signals to the application that we've read everything and can now shut down.\n\tclose(l.inputDone)\n\tl.scanFinished = true\n}", "func (cMap *MyStruct)Reduce(functor ReduceFunc, accum_str string, accum_int int) (string, int){\n\tres := new(ReduceStruct)\n\tres.accum_str = accum_str\n\tres.accum_int = accum_int\n\tcMap.reducing <- *res\n\n\tminPair := <- cMap.reduceTemp\n\n\treturn minPair.accum_str,minPair.accum_int\n}", "func (f genHelperDecoder) DecReadMapStart() int { return f.d.mapStart(f.d.d.ReadMapStart()) }", "func (dec *Decoder) Start(n int) error {\n\tif n < 1 {\n\t\tn = 1\n\t}\n\n\tif err := dec.readOSMHeader(); err != nil {\n\t\treturn err\n\t}\n\n\t// start data decoders\n\tfor i := 0; i < n; i++ {\n\t\tinput := make(chan pair)\n\t\toutput := make(chan pair)\n\t\tgo func() {\n\t\t\tdd := new(dataDecoder)\n\t\t\tfor p := range input {\n\t\t\t\tif p.e == nil {\n\t\t\t\t\t// send decoded objects or decoding error\n\t\t\t\t\tobjects, err := dd.Decode(p.i.(*OSMPBF.Blob))\n\t\t\t\t\toutput <- pair{objects, err}\n\t\t\t\t} else {\n\t\t\t\t\t// send input error as is\n\t\t\t\t\toutput <- pair{nil, p.e}\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(output)\n\t\t}()\n\n\t\tdec.inputs = append(dec.inputs, input)\n\t\tdec.outputs = append(dec.outputs, output)\n\t}\n\n\t// start reading OSMData\n\tgo func() {\n\t\tvar inputIndex int\n\t\tfor {\n\t\t\tinput := dec.inputs[inputIndex]\n\t\t\tinputIndex = (inputIndex + 1) % n\n\n\t\t\tblobHeader, blob, err := dec.readFileBlock()\n\t\t\tif err == nil && blobHeader.GetType() != \"OSMData\" {\n\t\t\t\terr = fmt.Errorf(\"unexpected fileblock of type %s\", blobHeader.GetType())\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\t// send blob for decoding\n\t\t\t\tinput <- pair{blob, nil}\n\t\t\t} else {\n\t\t\t\t// send input error as is\n\t\t\t\tinput <- pair{nil, err}\n\t\t\t\tfor _, input := range dec.inputs {\n\t\t\t\t\tclose(input)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tvar outputIndex int\n\t\tfor {\n\t\t\toutput := dec.outputs[outputIndex]\n\t\t\toutputIndex = (outputIndex + 1) % n\n\n\t\t\tp := <-output\n\t\t\tif p.i != nil {\n\t\t\t\t// send decoded objects one by one\n\t\t\t\tfor _, o := range p.i.([]interface{}) {\n\t\t\t\t\tdec.serializer <- pair{o, nil}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p.e != nil {\n\t\t\t\t// send input or decoding error\n\t\t\t\tdec.serializer <- pair{nil, p.e}\n\t\t\t\tclose(dec.serializer)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}", "func main() {\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\n\ttimer := time.NewTimer(time.Minute * 30)\n\tgo func() {\n\t\t<-timer.C\n\t\tcancelFunc()\n\t}()\n\n\tconfig := &redis.Options{\n\t\tAddr: \"localhost:6379\", // use default Addr\n\t\tPassword: \"\", // no password set\n\t\tDB: 0, // use default DB\n\t}\n\n\tredisClient := redis.NewClient(config)\n\n\treadGroupArgs := &redis.XReadGroupArgs{\n\t\tGroup: \"group1\",\n\t\tConsumer: \"consumer1\",\n\t\tStreams: []string{\"stream1\", \">\"},\n\t}\n\t// groupCreateArgs := &rs.XGroupCreateArgs{\n\t// \tStream: \"stream1\",\n\t// \tGroup: \"group1\",\n\t// \tStartID: \"$\",\n\t// \tMkStream: true,\n\t// }\n\tsource, err := rs.NewStreamSource(ctx, redisClient, readGroupArgs, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttoUpperMapFlow := flow.NewMap(toUpper, 1)\n\tsink := rs.NewStreamSink(ctx, redisClient, \"stream2\")\n\n\tsource.\n\t\tVia(toUpperMapFlow).\n\t\tTo(sink)\n}", "func mapData(is types.ImportSource) ([]types.ImportSource, error) {\n\tvar rr []types.ImportSource\n\tif is.DataMap == nil {\n\t\trr = append(rr, is)\n\t\treturn rr, nil\n\t}\n\n\t// unpack the map\n\t// @todo provide a better structure!!\n\tvar dataMap []map[string]interface{}\n\tsrc, _ := ioutil.ReadAll(is.DataMap)\n\terr := json.Unmarshal(src, &dataMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get header fields\n\tr := csv.NewReader(is.Source)\n\theader, err := r.Read()\n\tif err == io.EOF {\n\t\treturn rr, nil\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// maps { header field: field index } for a nicer lookup\n\thMap := make(map[string]int)\n\tfor i, h := range header {\n\t\thMap[h] = i\n\t}\n\n\tbufs := make(map[string]*MapBuffer)\n\n\t// data mapping\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// on next row, currently acquired headers are marked as final\n\t\tfor _, b := range bufs {\n\t\t\tb.hasHeader = true\n\t\t}\n\n\t\t// find applicable maps, that can be used for the given row.\n\t\t// the system allows composition, so all applicable maps are used.\n\t\tfor _, strmp := range dataMap {\n\t\t\tif ok, err := checkWhere(strmp[\"where\"], record, hMap); ok && err == nil {\n\t\t\t\tmaps, ok := strmp[\"map\"].([]interface{})\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, errors.New(\"dataMap.invalidMap \" + is.Name)\n\t\t\t\t}\n\n\t\t\t\t// handle current record and it's values\n\t\t\t\tfor _, mp := range maps {\n\t\t\t\t\tmm, ok := mp.(map[string]interface{})\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil, errors.New(\"dataMap.map.invalidEntry \" + is.Name)\n\t\t\t\t\t}\n\n\t\t\t\t\tfrom, ok := mm[\"from\"].(string)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil, errors.New(\"dataMap.map.entry.invalidFrom \" + is.Name)\n\t\t\t\t\t}\n\n\t\t\t\t\tto, ok := mm[\"to\"].(string)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil, errors.New(\"dataMap.map.invalidTo \" + is.Name)\n\t\t\t\t\t}\n\n\t\t\t\t\tvv := strings.Split(to, \".\")\n\t\t\t\t\tnm := vv[0]\n\t\t\t\t\tnmF := vv[1]\n\n\t\t\t\t\tif bufs[nm] == nil {\n\t\t\t\t\t\tvar bb bytes.Buffer\n\t\t\t\t\t\tww := csv.NewWriter(&bb)\n\t\t\t\t\t\tdefer ww.Flush()\n\t\t\t\t\t\tbufs[nm] = &MapBuffer{\n\t\t\t\t\t\t\tbuffer: &bb,\n\t\t\t\t\t\t\twriter: ww,\n\t\t\t\t\t\t\tname: nm,\n\t\t\t\t\t\t\thasHeader: false,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tval := record[hMap[from]]\n\n\t\t\t\t\t// handle data join\n\t\t\t\t\tif strings.Contains(from, \".\") {\n\t\t\t\t\t\t// construct a `alias.joinOnID` value, so we can perform a simple map lookup\n\t\t\t\t\t\tpts := strings.Split(from, \".\")\n\t\t\t\t\t\tbaseFieldAlias := pts[0]\n\t\t\t\t\t\toriginalOn := is.AliasMap[baseFieldAlias]\n\t\t\t\t\t\tjoinField := pts[1]\n\n\t\t\t\t\t\too := []string{}\n\t\t\t\t\t\tfor _, ff := range originalOn {\n\t\t\t\t\t\t\too = append(oo, record[hMap[ff]])\n\t\t\t\t\t\t}\n\t\t\t\t\t\tval = baseFieldAlias + \".\" + strings.Join(oo[:], \".\")\n\n\t\t\t\t\t\t// modify header field to specify what joined node field to use\n\t\t\t\t\t\tnmF += \":\" + joinField\n\t\t\t\t\t}\n\n\t\t\t\t\tbufs[nm].row = append(bufs[nm].row, val)\n\t\t\t\t\tif !bufs[nm].hasHeader {\n\t\t\t\t\t\tbufs[nm].header = append(bufs[nm].header, nmF)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t// write csv rows\n\t\tfor _, v := range bufs {\n\t\t\tif len(v.row) > 0 {\n\t\t\t\tv.writer.Write(v.row)\n\t\t\t\tv.row = []string{}\n\t\t\t}\n\t\t}\n\t}\n\n\t// construct output import source nodes\n\tfor _, v := range bufs {\n\t\trr = append(rr, types.ImportSource{\n\t\t\tName: v.name,\n\t\t\tSource: v.buffer,\n\t\t\tHeader: &v.header,\n\t\t\tFieldMap: is.FieldMap,\n\t\t\tAliasMap: is.AliasMap,\n\t\t\tValueMap: is.ValueMap,\n\t\t})\n\t}\n\n\treturn rr, nil\n}", "func mapCount(itr Iterator, m *mapper) {\n\tn := 0\n\tfor k, _ := itr.Next(); k != 0; k, _ = itr.Next() {\n\t\tn++\n\t}\n\tm.emit(itr.Time(), float64(n))\n}", "func stream(key string, fields map[string]bool, ring *[100]*NodeMembership) {\n fmt.Println(\"Streaming these fields from \" + key + \": \", fields)\n\n hashKey := hash(key)\n\n var request Request\n request.requestType = \"stream\"\n request.key = hashKey\n request.fields = fields\n\n pos := hashKey\n for {\n nodeMembership := ring[pos]\n if nodeMembership != nil {\n nodeMembership.requestReceiver <- request\n break\n } else {\n pos = (pos + 1) % len(ring)\n }\n }\n}", "func fanOutData() (output chan []mapreduce.KeyValue, done chan bool) {\n\tvar (\n\t\terr error\n\t\tfile *os.File\n\t\tfileEncoder *json.Encoder\n\t\treduceCounter int\n\t)\n\n\toutput = make(chan []mapreduce.KeyValue, REDUCE_BUFFER_SIZE)\n\tdone = make(chan bool)\n\n\tgo func() {\n\t\tfor v := range output {\n\t\t\tlog.Println(\"Fanning out file\", resultFileName(reduceCounter))\n\t\t\tif file, err = os.Create(resultFileName(reduceCounter)); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tfileEncoder = json.NewEncoder(file)\n\n\t\t\tfor _, value := range v {\n\t\t\t\tfileEncoder.Encode(value)\n\t\t\t}\n\n\t\t\tfile.Close()\n\t\t\treduceCounter++\n\t\t}\n\n\t\tdone <- true\n\t}()\n\n\treturn output, done\n}", "func(oj *outerJoin)ProcessStreamFirst(msg content.IContent,fieldsFromStream1 []string){\n\t//for join using hash\n\tif oj.JoinStrategy == HASH{\n\t\tvar joinFieldsVal []interface{}\n\t\t//get all the field values of stream 1 like age =18 name=ram kumar etc\n\t\tfor _,field := range fieldsFromStream1{\n\t\t\tjoinFieldsVal= append(joinFieldsVal,msg.Values()[strings.TrimSpace(field)]) //eats any unwanted white spaces..Note:may be the code is reduandant and may require cleaning in later version\n\t\t}\n\t\tkey := concatKeys(joinFieldsVal) //concats the fields '18 ram kumar' is obtained\n\t\toj.hashTable.Set(msg,key)// inserts the concat values as key and msg as the value in the hash table\n\t}\n}", "func (l *lex) run() {\n\tfor state := lexMapKey; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.tokens)\n}", "func ReduceSpread(values []interface{}) interface{} {\n\tresult := &spreadMapOutput{}\n\tpointsYielded := false\n\n\tfor _, v := range values {\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\tval := v.(*spreadMapOutput)\n\t\t// Initialize\n\t\tif !pointsYielded {\n\t\t\tresult.Max = val.Max\n\t\t\tresult.Min = val.Min\n\t\t\tresult.Type = val.Type\n\t\t\tpointsYielded = true\n\t\t}\n\t\tresult.Max = math.Max(result.Max, val.Max)\n\t\tresult.Min = math.Min(result.Min, val.Min)\n\t}\n\tif pointsYielded {\n\t\tswitch result.Type {\n\t\tcase Float64Type:\n\t\t\treturn result.Max - result.Min\n\t\tcase Int64Type:\n\t\t\treturn int64(result.Max - result.Min)\n\t\t}\n\t}\n\treturn nil\n}", "func (tr *tableReader) Start(ctx context.Context) {\n\tif tr.FlowCtx.Txn == nil {\n\t\tlog.Fatalf(ctx, \"tableReader outside of txn\")\n\t}\n\n\tctx = tr.StartInternal(ctx, tableReaderProcName)\n\n\tlimitBatches := !tr.parallelize\n\tlog.VEventf(ctx, 1, \"starting scan with limitBatches %t\", limitBatches)\n\tvar err error\n\tif tr.maxTimestampAge == 0 {\n\t\terr = tr.fetcher.StartScan(\n\t\t\tctx, tr.FlowCtx.Txn, tr.spans, limitBatches, tr.limitHint,\n\t\t\ttr.FlowCtx.TraceKV,\n\t\t\ttr.EvalCtx.TestingKnobs.ForceProductionBatchSizes,\n\t\t)\n\t} else {\n\t\tinitialTS := tr.FlowCtx.Txn.ReadTimestamp()\n\t\terr = tr.fetcher.StartInconsistentScan(\n\t\t\tctx, tr.FlowCtx.Cfg.DB, initialTS, tr.maxTimestampAge, tr.spans,\n\t\t\tlimitBatches, tr.limitHint, tr.FlowCtx.TraceKV,\n\t\t\ttr.EvalCtx.TestingKnobs.ForceProductionBatchSizes,\n\t\t)\n\t}\n\n\tif err != nil {\n\t\ttr.MoveToDraining(err)\n\t}\n}", "func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) }", "func TestMapReduce(t *testing.T) {\n\t// Start data producer.\n\n\torderChan := generateTestOrders(2000)\n\n\t// Define map and reduce functions.\n\n\tmapFunc := func(in *KeyValue, mapEmitChan KeyValueChan) {\n\t\to := in.Value.(*Order)\n\n\t\t// Emit analysis data for each item.\n\n\t\tfor _, i := range o.Items {\n\t\t\tunitDiscount := (i.UnitPrice / 100.0) * i.DiscountPerc\n\t\t\ttotalDiscount := unitDiscount * float64(i.Count)\n\t\t\ttotalAmount := (i.UnitPrice - unitDiscount) * float64(i.Count)\n\t\t\tanalysis := &OrderItemAnalysis{i.ArticleNo, i.Count, totalAmount, totalDiscount}\n\t\t\tarticleNo := strconv.Itoa(i.ArticleNo)\n\n\t\t\tmapEmitChan <- &KeyValue{articleNo, analysis}\n\t\t}\n\t}\n\n\treduceFunc := func(inChan KeyValueChan, reduceEmitChan KeyValueChan) {\n\t\tmemory := make(map[string]*OrderItemAnalysis)\n\n\t\t// Collect emitted analysis data.\n\n\t\tfor kv := range inChan {\n\t\t\tanalysis := kv.Value.(*OrderItemAnalysis)\n\n\t\t\tif existing, ok := memory[kv.Key]; ok {\n\t\t\t\texisting.Quantity += analysis.Quantity\n\t\t\t\texisting.Amount += analysis.Amount\n\t\t\t\texisting.Discount += analysis.Discount\n\t\t\t} else {\n\t\t\t\tmemory[kv.Key] = analysis\n\t\t\t}\n\t\t}\n\n\t\t// Emit it to map/reduce caller.\n\n\t\tfor articleNo, analysis := range memory {\n\t\t\treduceEmitChan <- &KeyValue{articleNo, analysis}\n\t\t}\n\t}\n\n\t// Now call MapReduce.\n\n\tfor result := range SortedMapReduce(orderChan, mapFunc, 100, reduceFunc, 20, KeyLessFunc) {\n\t\tt.Logf(\"%v\\n\", result.Value)\n\t}\n}", "func reduceFunc(input []mapreduce.KeyValue) (result []mapreduce.KeyValue) {\r\n\t// \tMaybe it's easier if we have an auxiliary structure? Which one?\r\n\t//\r\n\t// \tYou can check if a map have a key as following:\r\n\t// \t\tif _, ok := myMap[myKey]; !ok {\r\n\t//\t\t\t// Don't have the key\r\n\t//\t\t}\r\n\t//\r\n\t// \tReduce will receive KeyValue pairs that have string values, you may need\r\n\t// \tconvert those values to int before being able to use it in operations.\r\n\t// \tpackage strconv: func Atoi(s string) (int, error)\r\n\t//\r\n\t// \tIt's also possible to receive a non-numeric value (i.e. \"+\"). You can check the\r\n\t// \terror returned by Atoi and if it's not 'nil', use 1 as the value.\r\n\r\n\t/////////////////////////\r\n\t// YOUR CODE GOES HERE //\r\n\t/////////////////////////\r\n\r\n\tresult = make([]mapreduce.KeyValue, 0)\r\n\r\n\t// This auxiliary map is used to count the number of occurrences of a certain key\r\n\tauxiliaryMap := make(map[string]int)\r\n\r\n\tfor _, keyValuePair := range input {\r\n\r\n\t\t// If the value in the input key-value pair is numeric, then parse the value and update\r\n\t\t// the corresponding value in the auxiliary map.\r\n\t\t// If a key is not in the auxiliary map, 0 is returned as the corresponding value.\r\n\t\t// Using that fact, it's not necessary to check for the key.\r\n\t\tif value, err := strconv.Atoi(keyValuePair.Value); err == nil {\r\n\t\t\tauxiliaryMap[keyValuePair.Key] += value\r\n\r\n\t\t// If it's a non-numeric value, count as 1 occurrence\r\n\t\t// This considers that all possible non-numeric values are equivalent \r\n\t\t// (e.g. \"-\" or \"+\" have the same meaning when used as values)\r\n\t\t} else {\r\n\t\t\tauxiliaryMap[keyValuePair.Key] += 1\r\n\t\t}\r\n\t}\r\n\r\n\t// Convert the key-value pairs in auxiliary map to the output format (array of mapreduce.KeyValue structs)\r\n\tfor key, value := range auxiliaryMap {\r\n\t\tresult = append(result, mapreduce.KeyValue{key, strconv.Itoa(value)})\r\n\t}\r\n\treturn result\r\n}", "func main() {\n\tctx := pipeliner.FirstError()\n\tsourceCh := source(ctx, 0, 9000)\n\tbatches := batchInts(ctx, 320, sourceCh)\n\tfor batch := range batches {\n\t\tfmt.Printf(\"received batch of length: %d\\n\", len(batch))\n\t}\n}", "func Map(ctx context.Context,\n\tf func(args ...interface{}) interface{},\n\tinStreams ...<-chan interface{}) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor {\n\t\t\ta := []interface{}{}\n\t\t\tfor i := 0; i < len(inStreams); i++ {\n\t\t\t\tselect {\n\t\t\t\tcase x := <-inStreams[i]:\n\t\t\t\t\ta = append(a, x)\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\ts <- f(a...)\n\t\t}\n\t}()\n\treturn s\n}", "func (a *Agent) startProcessors(\n\tdst chan<- telegraf.Metric,\n\tprocessors models.RunningProcessors,\n) (chan<- telegraf.Metric, []*processorUnit, error) {\n\tvar units []*processorUnit\n\n\t// Sort from last to first\n\tsort.SliceStable(processors, func(i, j int) bool {\n\t\treturn processors[i].Config.Order > processors[j].Config.Order\n\t})\n\n\tvar src chan telegraf.Metric\n\tfor _, processor := range processors {\n\t\tsrc = make(chan telegraf.Metric, 100)\n\t\tacc := NewAccumulator(processor, dst)\n\n\t\terr := processor.Start(acc)\n\t\tif err != nil {\n\t\t\tfor _, u := range units {\n\t\t\t\tu.processor.Stop()\n\t\t\t\tclose(u.dst)\n\t\t\t}\n\t\t\treturn nil, nil, fmt.Errorf(\"starting processor %s: %w\", processor.LogName(), err)\n\t\t}\n\n\t\tunits = append(units, &processorUnit{\n\t\t\tsrc: src,\n\t\t\tdst: dst,\n\t\t\tprocessor: processor,\n\t\t})\n\n\t\tdst = src\n\t}\n\n\treturn src, units, nil\n}", "func (f *FakeOutput) Start(_ operator.Persister) error { return nil }", "func (w *Worker) startReader() {\n\tdump, err := os.Open(w.InputFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdecoder := xml.NewDecoder(dump)\n\n\tfor {\n\t\tt, _ := decoder.Token()\n\t\tif t == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Inspect the type of the token just read.\n\t\tswitch se := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\tif se.Name.Local == \"page\" {\n\t\t\t\tvar p Page\n\t\t\t\tdecoder.DecodeElement(&p, &se)\n\n\t\t\t\tfound := find(seen, p.Title)\n\t\t\t\tif found {\n\t\t\t\t\tlog.Printf(\"Duplicate title: %s. Skipping...\", p.Title)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tw.InPage <- &p\n\t\t\t}\n\t\t}\n\t}\n\n\t// Close the channels associated with reading/writing\n\tclose(w.InPage)\n\tlog.Println(\"Reader done\")\n}", "func consumeStart() {\n\tif *c.Zookeeper == \"\" {\n\t\tlogger.Fatalln(\"Zookeeper Config is invalid.\")\n\t\tos.Exit(1)\n\t}\n\n\tconfig := consumergroup.NewConfig()\n\tconfig.Offsets.Initial = sarama.OffsetNewest\n\n\tif c.OffsetStart > 0 {\n\t\tconfig.Offsets.Initial = c.OffsetStart\n\t}\n\n\tconfig.Offsets.ProcessingTimeout = 10 * time.Second\n\n\tzookeeperNodes, config.Zookeeper.Chroot = kazoo.ParseConnectionString(*c.Zookeeper)\n\n\tkafkaTopics := strings.Split(*c.Topics, \",\")\n\n\tconsumer, consumerErr := consumergroup.JoinConsumerGroup(c.ConsumerGroup, kafkaTopics, zookeeperNodes, config)\n\tif consumerErr != nil {\n\t\tlogger.Fatalln(consumerErr)\n\t}\n\n\tsig := make(chan os.Signal, 1)\n\tsignal.Notify(sig, os.Interrupt)\n\tgo func() {\n\t\t<-sig\n\t\tif err := consumer.Close(); err != nil {\n\t\t\tsarama.Logger.Println(\"Error closing the consumer\", err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor err := range consumer.Errors() {\n\t\t\tlogger.Fatalln(err)\n\t\t}\n\t}()\n\n\teventCount := 0\n\toffsets := make(map[string]map[int32]int64)\n\n\tfor message := range consumer.Messages() {\n\t\t// if offset beyond the offsetEnd number , exit.\n\t\tif c.OffsetEnd > 0 && message.Offset > c.OffsetEnd {\n\t\t\tlogger.Println(\"Offset beyond the end offset point.\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif offsets[message.Topic] == nil {\n\t\t\toffsets[message.Topic] = make(map[int32]int64)\n\t\t}\n\n\t\teventCount += 1\n\t\tif offsets[message.Topic][message.Partition] != 0 && offsets[message.Topic][message.Partition] != message.Offset-1 {\n\t\t\tlogger.Printf(\"Unexpected offset on %s:%d. Expected %d, found %d, diff %d.\\n\", message.Topic, message.Partition, offsets[message.Topic][message.Partition]+1, message.Offset, message.Offset-offsets[message.Topic][message.Partition]+1)\n\t\t}\n\n\t\toffsets[message.Topic][message.Partition] = message.Offset\n\t\tmsg <- string(message.Value)\n\t\tconsumer.CommitUpto(message)\n\t\tlogger.Printf(\"Value:%s,Partition:%v, Offset:%v\", message.Value, message.Partition, message.Offset)\n\t}\n\n\tlogger.Printf(\"Processed %d events.\", eventCount)\n}", "func (a *prUserIdMapperApp) getUserIdMappers(w http.ResponseWriter, r *http.Request) {\n\tUserIdMapper := prUserIdMapper{}\n\n\t//vars := mux.Vars(r)\n\t//fmt.Println(\"list tokens: \", vars)\n\n\tcount, _ := strconv.Atoi(r.FormValue(\"count\"))\n\tstart, _ := strconv.Atoi(r.FormValue(\"start\"))\n\n\tif count > 10 || count < 1 {\n\t\tcount = 10\n\t}\n\tif start < 0 {\n\t\tstart = 0\n\t}\n\n\tmappings, err := UserIdMapper.getUserIdMappers(a.DB, start, count)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\trespondWithJSON(w, http.StatusOK, mappings)\n}", "func (r *RecordStream) Start() {\n\tif r.state == idle {\n\t\tr.err = nil\n\t\tr.c.c.Request(&proto.FlushRecordStream{StreamIndex: r.index}, nil)\n\t\tr.c.c.Request(&proto.CorkRecordStream{StreamIndex: r.index, Corked: false}, nil)\n\t\tr.state = running\n\t}\n}", "func (s *scanner) run() {\n\tfor state := scanMain; state != nil; {\n\t\tstate = state(s)\n\t}\n\tclose(s.items)\n}", "func loadPartiallyStreamedDBS() ([]map[string]*dynamodb.AttributeValue, error) {\n\n\treq := &dynamodb.ScanInput{\n\t\tTableName: aws.String(stateTableName),\n\t}\n\tres, err := dySvc.Scan(req)\n\tif err != nil {\n\t\tlog.Println(\"loadPartiallyStreamedDBS Error\", err)\n\t\treturn make([]map[string]*dynamodb.AttributeValue, 0), err\n\t}\n\treturn res.Items, nil\n}", "func (p *literalProcessor) run() {\n\tfor {\n\t\tselect {\n\t\tcase ch := <-p.done:\n\t\t\tclose(ch)\n\t\t\treturn\n\t\tcase p.c <- map[string]interface{}{\"\": p.val}:\n\t\t}\n\t}\n}", "func (ss *StreamerServer) handleStartStreams(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tglog.Infof(\"Got request: '%s'\", string(b))\n\tssr := &model.StartStreamsReq{}\n\terr = json.Unmarshal(b, ssr)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tglog.Infof(\"Start streams request %+v\", *ssr)\n\tif ssr.Host == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"Should specify 'host' field\"))\n\t\treturn\n\t}\n\tif ssr.Repeat <= 0 {\n\t\tssr.Repeat = 1\n\t}\n\tif ssr.Simultaneous <= 0 {\n\t\tssr.Simultaneous = 1\n\t}\n\tif ssr.FileName == \"\" {\n\t\tssr.FileName = \"BigBuckBunny.mp4\"\n\n\t}\n\tif ssr.RTMP == 0 {\n\t\tssr.RTMP = 1935\n\t}\n\tif ssr.Media == 0 {\n\t\tssr.Media = 8935\n\t}\n\tif ssr.ProfilesNum != 0 {\n\t\tmodel.ProfilesNum = ssr.ProfilesNum\n\t}\n\tif _, err := os.Stat(ssr.FileName); os.IsNotExist(err) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(`File ` + ssr.FileName + ` does not exists`))\n\t\treturn\n\t}\n\tglog.Infof(\"Get request: %+v\", ssr)\n\tif !ssr.DoNotClearStats {\n\t\tss.streamer = testers.NewStreamer(ss.wowzaMode)\n\t}\n\tvar streamDuration time.Duration\n\tif ssr.Time != \"\" {\n\t\tif streamDuration, err = ParseStreamDurationArgument(ssr.Time); err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\t}\n\n\tbaseManifestID, err := ss.streamer.StartStreams(ssr.FileName, ssr.Host, strconv.Itoa(ssr.RTMP), strconv.Itoa(ssr.Media), ssr.Simultaneous,\n\t\tssr.Repeat, streamDuration, true, ssr.MeasureLatency, true, 3, 5*time.Second, 0)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tres, err := json.Marshal(\n\t\t&model.StartStreamsRes{\n\t\t\tSuccess: true,\n\t\t\tBaseManifestID: baseManifestID,\n\t\t},\n\t)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tw.Write(res)\n}", "func (a *Agent) startReadingMetrics(ID string, data *ContainerData) {\n\tstream := data.metricsStream\n\tlog.Infof(\"start reading metrics on container: %s\\n\", data.name)\n\tdecoder := json.NewDecoder(stream)\n\tstatsData := new(types.StatsJSON)\n\tvar previous, now int64\n\tmetricsEntry := &data.squashedMetricsMessage\n\tdata.squashedMetricsMessage.ContainerId = ID\n\tdata.squashedMetricsMessage.ContainerName = data.name\n\tdata.squashedMetricsMessage.ContainerShortName = data.shortName\n\tdata.squashedMetricsMessage.ContainerState = data.state\n\tdata.squashedMetricsMessage.ServiceName = data.serviceName\n\tdata.squashedMetricsMessage.ServiceId = data.serviceID\n\tdata.squashedMetricsMessage.TaskId = data.taskID\n\tdata.squashedMetricsMessage.StackName = data.stackName\n\tdata.squashedMetricsMessage.NodeId = data.nodeID\n\tdata.squashedMetricsMessage.Labels = data.labels\n\tfor err := decoder.Decode(statsData); err != io.EOF; err = decoder.Decode(statsData) {\n\t\tif err != nil {\n\t\t\tif err.Error() == \"EOF\" {\n\t\t\t\tlog.Infof(\"Stream metrics EOF container terminated: %s\\n\", data.name)\n\t\t\t} else {\n\t\t\t\tlog.Errorf(\"error reading metrics, closing metrics stream on container %s (%v)\\n\", data.name, err)\n\t\t\t}\n\t\t\tdata.metricsReadError = true\n\t\t\t_ = stream.Close()\n\t\t\ta.removeContainer(ID)\n\t\t\treturn\n\t\t}\n\t\tnow = time.Now().UnixNano()\n\t\tif now <= previous {\n\t\t\tnow = previous + 1\n\t\t}\n\t\tprevious = now\n\t\tdata.squashedMetricsMessage.Timestamp = statsData.Read.Format(time.RFC3339Nano)\n\t\tdata.squashedMetricsMessage.TimeId = fmt.Sprintf(\"%016X\", now)\n\t\ta.setMemMetrics(statsData, metricsEntry)\n\t\ta.setIOMetrics(data, statsData, metricsEntry)\n\t\ta.setNetMetrics(data, statsData, metricsEntry)\n\t\ta.setCPUMetrics(statsData, metricsEntry)\n\t\tdata.squashedMetricsMessageNb++\n\t\ta.nbMetricsComputed++\n\t}\n}", "func (l *LocalMapper) Next() (seriesKey string, timestamp int64, value interface{}) {\n\tfor {\n\t\t// if it's a raw query and we've hit the limit of the number of points to read in\n\t\t// for either this chunk or for the absolute query, bail\n\t\tif l.isRaw && (l.limit == 0 || l.perIntervalLimit == 0) {\n\t\t\treturn \"\", int64(0), nil\n\t\t}\n\n\t\t// find the minimum timestamp\n\t\tmin := -1\n\t\tminKey := int64(math.MaxInt64)\n\t\tfor i, k := range l.keyBuffer {\n\t\t\tif k != 0 && k <= l.tmax && k < minKey && k >= l.tmin {\n\t\t\t\tmin = i\n\t\t\t\tminKey = k\n\t\t\t}\n\t\t}\n\n\t\t// return if there is no more data in this group by interval\n\t\tif min == -1 {\n\t\t\treturn \"\", 0, nil\n\t\t}\n\n\t\t// set the current timestamp and seriesID\n\t\ttimestamp = l.keyBuffer[min]\n\t\tseriesKey = l.seriesKeys[min]\n\n\t\t// decode either the value, or values we need. Also filter if necessary\n\t\tvar value interface{}\n\t\tvar err error\n\t\tif l.isRaw && len(l.selectFields) > 1 {\n\t\t\tif fieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min]); err == nil {\n\t\t\t\tvalue = fieldsWithNames\n\n\t\t\t\t// if there's a where clause, make sure we don't need to filter this value\n\t\t\t\tif l.filters[min] != nil {\n\t\t\t\t\tif !matchesWhere(l.filters[min], fieldsWithNames) {\n\t\t\t\t\t\tvalue = nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tvalue, err = l.decoder.DecodeByID(l.fieldID, l.valueBuffer[min])\n\n\t\t\t// if there's a where clase, see if we need to filter\n\t\t\tif l.filters[min] != nil {\n\t\t\t\t// see if the where is only on this field or on one or more other fields. if the latter, we'll have to decode everything\n\t\t\t\tif len(l.whereFields) == 1 && l.whereFields[0] == l.fieldName {\n\t\t\t\t\tif !matchesWhere(l.filters[min], map[string]interface{}{l.fieldName: value}) {\n\t\t\t\t\t\tvalue = nil\n\t\t\t\t\t}\n\t\t\t\t} else { // decode everything\n\t\t\t\t\tfieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min])\n\t\t\t\t\tif err != nil || !matchesWhere(l.filters[min], fieldsWithNames) {\n\t\t\t\t\t\tvalue = nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// advance the cursor\n\t\tnextKey, nextVal := l.cursors[min].Next()\n\t\tif nextKey == nil {\n\t\t\tl.keyBuffer[min] = 0\n\t\t} else {\n\t\t\tl.keyBuffer[min] = int64(btou64(nextKey))\n\t\t}\n\t\tl.valueBuffer[min] = nextVal\n\n\t\t// if the value didn't match our filter or if we didn't find the field keep iterating\n\t\tif err != nil || value == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// if it's a raw query, we always limit the amount we read in\n\t\tif l.isRaw {\n\t\t\tl.limit--\n\t\t\tl.perIntervalLimit--\n\t\t}\n\n\t\treturn seriesKey, timestamp, value\n\t}\n}", "func (pr *PeriodicReader) start(ctx context.Context) {\n\tdefer pr.wait.Done()\n\tticker := time.NewTicker(pr.interval)\n\tfor {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tif err := pr.collectWithTimeout(ctx, pr.exporter.ExportMetrics); err != nil {\n\t\t\t\totel.Handle(err)\n\t\t\t}\n\t\t}\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\tcallMs := true\n\n\ttfl := make([]string, 0)\n\tfor callMs {\n\t\tcallMs, _ = callMaster(mapf, &tfl)\n\t\t//time.Sleep(5 * time.Second)\n\t}\n\n\t//\tsort.Sort(ByKey(intermediate))\n\trand.Seed(time.Now().UnixNano())\n\tred := rand.Intn(1000)\n\tfmt.Printf(\"Reducer filename %d \\n\", red)\n\toname := fmt.Sprintf(\"mr-out-%d.txt\", red)\n\n\tofile, _ := os.Create(oname)\n\tintermediate1 := []KeyValue{}\n\tvar fm sync.Mutex\n\tfm.Lock()\n\tfor _, tf := range tfl {\n\t\tfile, err := os.Open(tf)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot open %v\", tf)\n\t\t}\n\t\tdec := json.NewDecoder(file)\n\t\tfor {\n\t\t\tvar kv KeyValue\n\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tintermediate1 = append(intermediate1, kv)\n\t\t}\n\t}\n\tsort.Sort(ByKey(intermediate1))\n\n\tfm.Unlock()\n\ti := 0\n\tfor i < len(intermediate1) {\n\t\tj := i + 1\n\t\tfor j < len(intermediate1) && intermediate1[j].Key == intermediate1[i].Key {\n\t\t\tj++\n\t\t}\n\t\tvalues := []string{}\n\t\tfor k := i; k < j; k++ {\n\t\t\tvalues = append(values, intermediate1[k].Value)\n\t\t}\n\t\toutput := reducef(intermediate1[i].Key, values)\n\n\t\t// this is the correct format for each line of Reduce output.\n\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate1[i].Key, output)\n\n\t\ti = j\n\t}\n\tfor _, f := range tfl {\n\t\tos.Remove(f)\n\t}\n\tofile.Close()\n\tCallNotify(\"wc\", 0)\n\n}", "func (r *Transformer) flushUncombined(ctx context.Context) {\n\tfor source := range r.batchMap {\n\t\tfor _, entry := range r.batchMap[source].entries {\n\t\t\tr.Write(ctx, entry)\n\t\t}\n\t\tr.removeBatch(source)\n\t}\n\tr.ticker.Reset(r.forceFlushTimeout)\n}", "func (tr *tableReader) Run(wg *sync.WaitGroup) {\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\n\tif log.V(2) {\n\t\tlog.Infof(tr.ctx, \"starting (filter: %s)\", tr.filter)\n\t\tdefer log.Infof(tr.ctx, \"exiting\")\n\t}\n\n\tif err := tr.fetcher.StartScan(tr.flowCtx.txn, tr.spans, tr.getLimitHint()); err != nil {\n\t\tlog.Errorf(tr.ctx, \"scan error: %s\", err)\n\t\ttr.output.Close(err)\n\t\treturn\n\t}\n\tvar rowIdx int64\n\tfor {\n\t\toutRow, err := tr.nextRow()\n\t\tif err != nil || outRow == nil {\n\t\t\ttr.output.Close(err)\n\t\t\treturn\n\t\t}\n\t\tif log.V(3) {\n\t\t\tlog.Infof(tr.ctx, \"pushing row %s\\n\", outRow)\n\t\t}\n\t\t// Push the row to the output RowReceiver; stop if they don't need more\n\t\t// rows.\n\t\tif !tr.output.PushRow(outRow) {\n\t\t\tif log.V(2) {\n\t\t\t\tlog.Infof(tr.ctx, \"no more rows required\")\n\t\t\t}\n\t\t\ttr.output.Close(nil)\n\t\t\treturn\n\t\t}\n\t\trowIdx++\n\t\tif tr.hardLimit != 0 && rowIdx == tr.hardLimit {\n\t\t\t// We sent tr.hardLimit rows.\n\t\t\ttr.output.Close(nil)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (mi *MinerIndex) start() {\n\tdefer func() { mi.finished <- struct{}{} }()\n\n\tif err := mi.updateOnChainIndex(); err != nil {\n\t\tlog.Errorf(\"error on initial updating miner index: %s\", err)\n\t}\n\tmi.chMeta <- struct{}{}\n\tfor {\n\t\tselect {\n\t\tcase <-mi.ctx.Done():\n\t\t\tlog.Info(\"graceful shutdown of background miner index\")\n\t\t\treturn\n\t\tcase <-time.After(metadataRefreshInterval):\n\t\t\tselect {\n\t\t\tcase mi.chMeta <- struct{}{}:\n\t\t\tdefault:\n\t\t\t\tlog.Info(\"skipping meta index update since it's busy\")\n\t\t\t}\n\t\tcase <-time.After(util.AvgBlockTime):\n\t\t\tif err := mi.updateOnChainIndex(); err != nil {\n\t\t\t\tlog.Errorf(\"error when updating miner index: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func consume(ctx context.Context, count int, p *payload) []int {\n\taccumulator := make([]int, 0)\n\tvisited := make(map[int]struct{})\n\tfor i := 0; i < count; i++ {\n\t\tselect {\n\t\tcase res := <-p.res:\n\t\t\tfor _, val := range res.Numbers {\n\t\t\t\tif _, ok := visited[val]; !ok {\n\t\t\t\t\taccumulator = append(accumulator, val)\n\t\t\t\t\tvisited[val] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\tcase err := <-p.err:\n\t\t\tlog.Println(err)\n\t\tcase <-ctx.Done():\n\t\t\tlog.Println(ctx.Err())\n\t\t\tsort.Ints(accumulator)\n\t\t\treturn accumulator\n\t\t}\n\t}\n\tsort.Ints(accumulator)\n\treturn accumulator\n}", "func main() {\n\n //bring up the services\n\tmasterSrvAddr := master.StartMasterSrv(9090) //9090\n\tworkerSrvAddr1 := worker.StartWorkerSrv(9091); //9091 ,9092, 9093\n\tworkerSrvAddr2 := worker.StartWorkerSrv(9092);\n\tworker.StartWorkerCli(masterSrvAddr, []string{workerSrvAddr1,workerSrvAddr2});\n\tmaster.StartMasterCli();\n\n\t//distributed map-reduce flow\n\tmapOutput,err := master.DoOperation([]string{\"/Users/k0c00nc/go/src/MapReduce/res/input.txt\", \"/Users/k0c00nc/go/src/distributedDb\" +\n\t\t\"/res/input1.txt\"},\"Map\")\n\tif err !=nil{\n\t\tfmt.Printf(\"map phase failed with err %s \", err.Error())\n\t}\n\n\tlocalAggregation,err :=master.DoOperation(mapOutput,\"LocalAggregation\")\n\tif err !=nil{\n\t\tfmt.Printf(\"localAggregation phase failed with err %s \", err.Error())\n\t}\n\n\tshuffing,err :=master.DoOperation(localAggregation,\"Shuffing\")\n\tif err !=nil{\n\t\tfmt.Printf(\"shuffing phase failed with err %s \", err.Error())\n\t}\n\n\treduce,err :=master.DoOperation(shuffing,\"Reduce\")\n\tif err !=nil{\n\t\tfmt.Printf(\"reduce phase failed with err %s \", err.Error())\n\t}\n\n fmt.Println(\"MR output are in file\", reduce[0])\n\n}", "func (op *compose) run(s stream) stream {\n\tif err := op.validate(op.streams); err != nil {\n\t\ts.err = err\n\t\treturn s\n\t}\n\tif s.streams == nil {\n\t\ts.streams = make([]stream, 0)\n\t}\n\tfor _, str := range op.streams {\n\t\ts.streams = append(s.streams, str.(stream))\n\t}\n\treturn s\n}", "func Reduce(parts int, r Reducer) {\n\tfiles := make([]*os.File, parts)\n\n\tless := func(left []byte, right []byte) bool {\n\t\treturn bytes.Compare(countedEntry(left).bytes(), countedEntry(right).bytes()) < 0\n\t}\n\th := newStreamAggregator(less)\n\tfor i := 0; i < parts; i++ {\n\t\tf, err := os.Open(fmt.Sprintf(\"part%v.dat\", i))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfiles[i] = f\n\t\tif sr := newStreamReader(bufio.NewReaderSize(f, 1<<20)); sr != nil {\n\t\t\theap.Push(h, sr)\n\t\t}\n\t}\n\n\tfor h.Len() > 0 {\n\t\tsr := heap.Pop(h).(*streamReader)\n\t\tr.Reduce(sr.head())\n\t\tif sr.next() {\n\t\t\theap.Push(h, sr)\n\t\t}\n\t}\n\tr.End()\n\n\tfor _, f := range files[:] {\n\t\tif err := f.Close(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n}", "func (this *KxiWorker) Reduce(key interface{}, values []interface{}) (kv mr.KeyValue) {\n kv = mr.NewKeyValue()\n switch key.(mr.GroupKey).Group() {\n case GROUP_URL_SERV:\n vals := mr.ConvertAnySliceToFloat(values)\n kv[TIME_ALL] = stats.StatsSum(vals)\n kv[TIME_MAX] = stats.StatsMax(vals)\n kv[TIME_TOP] = stats.StatsSumTopN(vals, topsum)\n kv[TIME_AVG] = stats.StatsMean(vals)\n kv[TIME_STD] = stats.StatsSampleStandardDeviationCoefficient(vals)\n kv[CALL_ALL] = float64(stats.StatsCount(vals))\n case GROUP_KXI:\n vals := mr.ConvertAnySliceToFloat(values)\n kv[TIME_ALL] = stats.StatsSum(vals)\n kv[TIME_MIN] = stats.StatsMin(vals)\n kv[TIME_TOP] = stats.StatsSumTopN(vals, topsum)\n kv[TIME_MAX] = stats.StatsMax(vals)\n kv[TIME_AVG] = stats.StatsMean(vals)\n kv[TIME_STD] = stats.StatsSampleStandardDeviationCoefficient(vals)\n kv[CALL_ALL] = float64(stats.StatsCount(vals))\n case GROUP_URL_RID:\n vals := mr.ConvertAnySliceToFloat(values)\n kv[CALL_ALL] = float64(stats.StatsCount(vals))\n kv[TIME_ALL] = stats.StatsSum(vals)\n case GROUP_URL_SQL:\n vals := mr.ConvertAnySliceToFloat(values)\n kv[CALL_ALL] = float64(stats.StatsCount(vals))\n kv[TIME_MAX] = stats.StatsMax(vals)\n kv[TIME_AVG] = stats.StatsMean(vals)\n case GROUP_URL:\n vals := mr.ConvertAnySliceToString(values) // rids of this url\n c := stats.NewCounter(vals)\n kv[REQ_ALL] = float64(len(c))\n }\n\n return\n}", "func MapSum(itr Iterator) interface{} {\n\tn := float64(0)\n\tcount := 0\n\tvar resultType NumberType\n\tfor k, v := itr.Next(); k != -1; k, v = itr.Next() {\n\t\tcount++\n\t\tswitch n1 := v.(type) {\n\t\tcase float64:\n\t\t\tn += n1\n\t\tcase int64:\n\t\t\tn += float64(n1)\n\t\t\tresultType = Int64Type\n\t\t}\n\t}\n\tif count > 0 {\n\t\tswitch resultType {\n\t\tcase Float64Type:\n\t\t\treturn n\n\t\tcase Int64Type:\n\t\t\treturn int64(n)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *Mare) Map(mapFunc func(input interface{}) []MapOutput) *Mare {\n\tif m.mapWorkerCnt == 0 {\n\t\tm.mapWorkerCnt++\n\t}\n\tif m.mapOutChan != nil {\n\t\tpanic(\"Map already in progress !\")\n\t}\n\n\t// Start the map\n\tm.mapOutWorkers.Add(m.mapWorkerCnt)\n\tm.mapOutChan = make(chan MapOutput, m.mapWorkerCnt)\n\tfor i := 0; i < m.mapWorkerCnt; i++ {\n\t\tgo func() {\n\t\t\tdefer m.mapOutWorkers.Done()\n\t\t\tfor item := range m.mapInChan {\n\t\t\t\tfor _, output := range mapFunc(item) {\n\t\t\t\t\tif m.trace {\n\t\t\t\t\t\tlog.Printf(\"Emit %v with key %v\", output.Key, output.Value)\n\t\t\t\t\t}\n\t\t\t\t\tm.mapOutChan <- output\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Wait for end of work and close\n\tgo func() {\n\t\tm.mapOutWorkers.Wait()\n\t\tclose(m.mapOutChan)\n\t}()\n\n\treturn m\n}", "func (l *Service) Iterate(bytes []byte, stream primitive.Stream) {\n\tdefer stream.Close()\n\tfor _, value := range l.values {\n\t\tstream.Result(proto.Marshal(&IterateResponse{\n\t\t\tValue: value,\n\t\t}))\n\t}\n}", "func startPipelineFunction(numbers chan<- int) {\n\tfor i := 1; i <= 10; i++ {\n\t\tnumbers <- i\n\t}\n\tclose(numbers)\n}", "func ReduceFirst(values []interface{}) interface{} {\n\tout := &firstLastMapOutput{}\n\tpointsYielded := false\n\n\tfor _, v := range values {\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\tval := v.(*firstLastMapOutput)\n\t\t// Initialize first\n\t\tif !pointsYielded {\n\t\t\tout.Time = val.Time\n\t\t\tout.Val = val.Val\n\t\t\tpointsYielded = true\n\t\t}\n\t\tif val.Time < out.Time {\n\t\t\tout.Time = val.Time\n\t\t\tout.Val = val.Val\n\t\t}\n\t}\n\tif pointsYielded {\n\t\treturn out.Val\n\t}\n\treturn nil\n}", "func (m *Map) Reduce(\n\treduce func(map[interface{}]interface{}) interface{},\n\tjoin func(x, y interface{}) interface{},\n) interface{} {\n\tsplits := m.splits\n\t// NewMap ensures that len(splits) > 0\n\tresult := splits[0].reduce(reduce)\n\tfor i := 1; i < len(splits); i++ {\n\t\tresult = join(result, splits[i].reduce(reduce))\n\t}\n\treturn result\n}", "func (*filterMetricProcessor) Start(ctx context.Context, host component.Host) error {\n\treturn nil\n}", "func (s *Stream) Map(f interface{}) *Stream {\n\top, err := unary.MapFunc(f)\n\tif err != nil {\n\t\ts.drainErr(err)\n\t\treturn s\n\t}\n\treturn s.Transform(op)\n}", "func generateMR(in *pb.SliceJobRequest) (ch chan *bvInfo, err error) {\n\tch = make(chan *bvInfo)\n\tgo func() {\n\t\tjob := in.GetNewJobRequest()\n\t\tvar base *stl.STL\n\t\tfor i, stlFile := range job.GetStlFiles() {\n\t\t\tstlMesh, err := stl.New(stlFile, job.GetDim(), job.GetNX(), job.GetNY(), job.GetNZ())\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"stl.New: %v\", err)\n\t\t\t}\n\t\t\tif i == 0 {\n\t\t\t\tbase = stlMesh\n\t\t\t}\n\t\t\tlog.Printf(\"generateMR: i=%v, %v triangles\", i, len(stlMesh.Mesh.Triangles))\n\n\t\t\t// Now dice it up.\n\t\t\tfor zi := 0; zi < int(job.GetNZ()); zi++ {\n\t\t\t\tz1 := base.MBB.Min.Z + float64(zi*base.DimZ)*base.MMPV\n\t\t\t\tfor yi := 0; yi < int(job.GetNY()); yi++ {\n\t\t\t\t\ty1 := base.MBB.Min.Y + float64(yi*base.DimY)*base.MMPV\n\t\t\t\t\tfor xi := 0; xi < int(job.GetNX()); xi++ {\n\t\t\t\t\t\tx1 := base.MBB.Min.X + float64(xi*base.DimX)*base.MMPV\n\n\t\t\t\t\t\tmapIn := &bvInfo{\n\t\t\t\t\t\t\tdx: xi * base.DimX,\n\t\t\t\t\t\t\tdy: yi * base.DimY,\n\t\t\t\t\t\t\tdz: zi * base.DimZ,\n\t\t\t\t\t\t\tbv: &binvox.BinVOX{\n\t\t\t\t\t\t\t\tNX: base.DimX, NY: base.DimY, NZ: base.DimZ,\n\t\t\t\t\t\t\t\tTX: x1, TY: y1, TZ: z1,\n\t\t\t\t\t\t\t\tScale: base.SubregionScale,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tbase: i == 0,\n\t\t\t\t\t\t\tmesh: stlMesh.Mesh,\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlog.Printf(\"generateMR: sending STL #%v to mapper\", i)\n\t\t\t\t\t\tch <- mapIn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch, nil\n}", "func (l *lexer) run() {\n\tfor state := lexStart; state != nil; {\n\t\tstate = state(l)\n\t}\n}", "func (f *filtererProcessor) Start(ctx context.Context) {\n\tctx = f.StartInternal(ctx, filtererProcName)\n\tf.input.Start(ctx)\n}", "func Reduce[ValT, AccumT any](\n\tslc []ValT,\n\tstart AccumT,\n\tfn func(AccumT, ValT) AccumT,\n) AccumT {\n\tret := start\n\tfor _, t := range slc {\n\t\tret = fn(ret, t)\n\t}\n\treturn ret\n}", "func StartMaster(config *Config, reduceFunc ReduceFunc) error {\n\t// Config variables\n\tmaster := config.Master\n\tinput := config.InputData\n\ttable := config.Table\n\toutput := config.Output\n\tm := config.M\n\tr := config.R\n\n\t// Load the input data\n\tdb, err := sql.Open(\"sqlite3\", input)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tfailure(\"sql.Open\")\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\t// Count the work to be done\n\tquery, err := db.Query(fmt.Sprintf(\"select count(*) from %s;\", table))\n\tif err != nil {\n\t\tfailure(\"sql.Query4\")\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\tdefer query.Close()\n\n\t// Split up the data per m\n\tvar count int\n\tvar chunksize int\n\tquery.Next()\n\tquery.Scan(&count)\n\tchunksize = int(math.Ceil(float64(count)/float64(m)))\n\tvar works []Work\n\tfor i:=0; i<m; i++ {\n\t\tvar work Work\n\t\twork.Type = TYPE_MAP\n\t\twork.Filename = input\n\t\twork.Offset = i * chunksize\n\t\twork.Size = chunksize\n\t\twork.WorkerID = i\n\t\tworks = append(works, work)\n\t}\n\n\t// Set up the RPC server to listen for workers\n\tme := new(Master)\n\tme.Maps = works\n\tme.M = m\n\tme.R = r\n\tme.ReduceCount = 0\n\tme.DoneChan = make(chan int)\n\tme.Table = table\n\tme.Output = output\n\n\trpc.Register(me)\n\trpc.HandleHTTP()\n\n\tgo func() {\n\t\terr := http.ListenAndServe(master, nil)\n\t\tif err != nil {\n\t\t\tfailure(\"http.ListenAndServe\")\n\t\t\tlog.Println(err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n\n\t<-me.DoneChan\n\n\terr = Merge(r, reduceFunc, output)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn nil\n}", "func (f Filter) run(node *yaml.RNode) error {\n\tfor key, value := range f.Annotations {\n\t\tif err := node.PipeE(fsslice.Filter{\n\t\t\tFsSlice: f.FsSlice,\n\t\t\tSetValue: fsslice.SetEntry(key, value),\n\t\t\tCreateKind: yaml.MappingNode, // Annotations are MappingNodes.\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SyncTask) Start(input io.Reader, synced, failed io.Writer) error {\n\n\tstart := time.Now()\n\n\tkeysIn := make(chan s3.Key, s.SyncPara*BufferFactor)\n\tkeysOk := make(chan s3.Key, s.SyncPara*BufferFactor)\n\tkeysFail := make(chan s3.Key, s.SyncPara*BufferFactor)\n\n\tdecoders := make(chan []byte, s.DecodePara*BufferFactor)\n\n\t// start JSON decoders\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"key_decoders\": s.DecodePara,\n\t\t\"buffer_size\": cap(decoders),\n\t}).Info(\"starting key decoders\")\n\n\tdecGroup := sync.WaitGroup{}\n\tfor i := 0; i < s.DecodePara; i++ {\n\t\tdecGroup.Add(1)\n\t\tgo s.decode(&decGroup, decoders, keysIn)\n\t}\n\n\t// start S3 sync workers\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"sync_workers\": s.SyncPara,\n\t\t\"buffer_size\": cap(keysIn),\n\t}).Info(\"starting key sync workers\")\n\tsyncGroup := sync.WaitGroup{}\n\tfor i := 0; i < s.SyncPara; i++ {\n\t\tsyncGroup.Add(1)\n\t\tgo s.syncKey(&syncGroup, s.src, s.dst, keysIn, keysOk, keysFail)\n\t}\n\n\t// track keys that have been sync'd, and those that we failed to sync.\n\tlogrus.Info(\"starting to write progress\")\n\tencGroup := sync.WaitGroup{}\n\tencGroup.Add(2)\n\tgo s.encode(&encGroup, synced, keysOk)\n\tgo s.encode(&encGroup, failed, keysFail)\n\n\t// feed the pipeline by reading the listing file\n\tlogrus.Info(\"starting to read key listing file\")\n\terr := s.readLines(input, decoders)\n\n\t// when done reading the source file, wait until the decoders\n\t// are done.\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"since_start\": time.Since(start),\n\t\t\"line_count\": metrics.fileLines.String(),\n\t}).Info(\"done reading lines from sync list\")\n\tclose(decoders)\n\tdecGroup.Wait()\n\n\t// when the decoders are all done, wait for the sync workers to finish\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"since_start\": time.Since(start),\n\t\t\"line_count\": metrics.decodedKeys.String(),\n\t}).Info(\"done decoding keys from sync list\")\n\n\tclose(keysIn)\n\tsyncGroup.Wait()\n\n\tclose(keysOk)\n\tclose(keysFail)\n\n\tencGroup.Wait()\n\n\t// the source file is read, all keys were decoded and sync'd. we're done.\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"since_start\": time.Since(start),\n\t\t\"sync_ok\": metrics.syncOk.String(),\n\t\t\"sync_fail\": metrics.syncAbandoned.String(),\n\t}).Info(\"done syncing keys\")\n\n\treturn err\n}" ]
[ "0.6495857", "0.6274932", "0.5855628", "0.5777703", "0.56839734", "0.5476287", "0.542622", "0.54102355", "0.53059393", "0.5219895", "0.5206251", "0.52048934", "0.49814385", "0.49803457", "0.4949015", "0.49386513", "0.49209532", "0.48929322", "0.48915768", "0.48896503", "0.48856694", "0.48699242", "0.48605788", "0.48131874", "0.4791691", "0.47801068", "0.47538543", "0.47475982", "0.47422338", "0.47063738", "0.4694349", "0.4690032", "0.46752122", "0.46688163", "0.46423087", "0.46233392", "0.46163654", "0.45995486", "0.4597929", "0.45794192", "0.4568776", "0.45680812", "0.45664898", "0.45623016", "0.45525452", "0.4549277", "0.45475644", "0.45365253", "0.45341063", "0.45292112", "0.45260385", "0.452064", "0.4515256", "0.45101136", "0.45005214", "0.44979456", "0.44707906", "0.44628778", "0.44582742", "0.44573426", "0.44566116", "0.44564694", "0.44524723", "0.44442496", "0.44423375", "0.44318637", "0.44123265", "0.4403566", "0.43957233", "0.43943813", "0.43931088", "0.43925872", "0.43918785", "0.43909547", "0.43789124", "0.43744013", "0.4363477", "0.43563393", "0.4355176", "0.43522656", "0.43493047", "0.43488994", "0.4346883", "0.4345049", "0.43421823", "0.43411422", "0.43357873", "0.43338895", "0.43318787", "0.43305746", "0.4324342", "0.4322726", "0.4318932", "0.4318859", "0.43185672", "0.43159202", "0.43140674", "0.4312466", "0.43014652", "0.4298258" ]
0.66319555
0
stop stops the reducer.
func (r *reducer) stop() { for _, m := range r.mappers { m.stop() } syncClose(r.done) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *reaper) stop() {\n\tr.stopCh <- struct{}{}\n}", "func (p *literalProcessor) stop() { syncClose(p.done) }", "func (e *binaryExprEvaluator) stop() {\n\te.lhs.stop()\n\te.rhs.stop()\n\tsyncClose(e.done)\n}", "func (m *mapper) stop() { syncClose(m.done) }", "func (w *worker) stop() {\n\tatomic.StoreInt32(&w.running, 0)\n}", "func (w *Watch) stop() {\n\tw.done <- struct{}{}\n}", "func (m *Module) stop() {\n\tm.mux.Lock()\n\tdefer m.mux.Unlock()\n\tif m.started && !m.isFinished() {\n\t\tclose(m.done)\n\t}\n}", "func (d *D) stop() {\n\tclose(d.stopCh)\n}", "func (c *Controller) stop(name types.NamespacedName) {\n\tproc, ok := c.procs[name]\n\tif !ok {\n\t\treturn\n\t}\n\n\tif proc.cancelFunc == nil {\n\t\treturn\n\t}\n\tproc.cancelFunc()\n\t<-proc.doneCh\n\tproc.probeWorker = nil\n\tproc.cancelFunc = nil\n\tproc.doneCh = nil\n}", "func (sl *ReceiverLoop) stop() {\n\tsl.cancel()\n\t<-sl.stopped\n}", "func (w *worker) stop() {\n\tselect {\n\tcase w.stopCh <- struct{}{}:\n\tdefault: // Non-blocking.\n\t}\n}", "func (s *schedule) stop() {\n\tif !s.running {\n\t\treturn\n\t}\n\ts.running = false\n\ts.stopCh <- struct{}{}\n}", "func (r *RunCommand) stop() {\n\tr.logTail.Stop()\n\tr.pw.Stop()\n}", "func (w *worker) stop() {\n\tw.quitChan <- true\n}", "func (_m *MockCompactionPlanContext) stop() {\n\t_m.Called()\n}", "func (s *Scanner) Stop() {\n\ts.stop <- struct{}{}\n}", "func (f *framework) stop() {\n\tclose(f.epochChan)\n}", "func (bt *Metricbeat) Stop() {\n\tclose(bt.done)\n}", "func (hb *heartbeat) stop() {\n\tselect {\n\tcase hb.stopChan <- struct{}{}:\n\tdefault:\n\t}\n}", "func (it *messageIterator) stop() {\n\tit.cancel()\n\tit.mu.Lock()\n\tit.checkDrained()\n\tit.mu.Unlock()\n\tit.wg.Wait()\n}", "func (t *Tracer) Stop() {}", "func (margelet *Margelet) Stop() {\n\tmargelet.running = false\n}", "func (c *Collector) Stop() {\n\tclose(c.stopChan)\n\t<-c.doneChan\n\n\t// Clean the metrics on exit.\n\tfor _, state := range api.NodeStatus_State_name {\n\t\tnodesMetric.WithValues(strings.ToLower(state)).Set(0)\n\t}\n}", "func (a *Acceptor) Stop() {\n\t//TODO(student): Task 3 - distributed implementation\n\ta.stop <- 0\n\n}", "func (er *BufferedExchangeReporter) Stop() {\n\n}", "func (ns *EsIndexer) Stop() {\n\n}", "func (l *Learner) Stop() {\n\t//TODO(student): Task 3 - distributed implementation\n}", "func (n *Node) stop() (err error) {\n\tif err = n.stateCheck(nodeRunning, nodeHealthChecking); err != nil {\n\t\treturn\n\t}\n\tn.setState(nodeShuttingDown)\n\tn.stopChan <- true\n\tn.expireTicker.Stop()\n\tclose(n.stopChan)\n\tlogDebug(\"[Node]\", \"(%v) shutting down.\", n)\n\tn.shutdown()\n\treturn\n}", "func (bfx *bloomfilterIndexer) Stop(ctx context.Context) error {\n\treturn bfx.flusher.KVStoreWithBuffer().Stop(ctx)\n}", "func (t *channelTransport) stop() {\n\tt.stopChan <- struct{}{}\n}", "func (l *Learner) Stop() {\n\t// TODO(student): distributed implementation\n\tl.stop <- struct{}{}\n}", "func (l *Learner) Stop() {\n\tl.stop <- true\n}", "func (w *Processor) Stop() {\n\tclose(w.stop)\n}", "func op_STOP(pc *uint64, in *interpreter, ctx *callCtx) uint64 {\n\treturn 0\n}", "func (pool *WebSocketPool)stop() {\n\tclose(pool.input)\n}", "func (d *dispatcher) Stop() {\n\tclose(d.inCh)\n}", "func (r *ReceiveFuncState) Stop() {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tr.running = false\n\tselfCheckLocked()\n}", "func (rf *Raft) stop(timer *time.Timer) {\n\tif !timer.Stop() && len(timer.C) != 0 {\n\t\t<-timer.C\n\t}\n}", "func (r *restoreRunner) Stop() error {\n\t// Currently noop\n\treturn nil\n}", "func (r *RecordStream) Stop() {\n\tif r.state == running {\n\t\tr.c.c.Request(&proto.CorkRecordStream{StreamIndex: r.index, Corked: true}, nil)\n\t\tr.state = idle\n\t}\n}", "func (s *ObjectStore) Stop() {\n\tclose(s.stopCh)\n}", "func (bc *BotCommand) stop() {\n\tbc.Lock()\n\tdefer bc.Unlock()\n\tbc.running = false\n}", "func (a *appsec) stop() {\n\ta.unregisterWAF()\n\ta.limiter.Stop()\n}", "func (s *ContinuousScanner) Stop() {\n\ts.stop <- struct{}{}\n}", "func (c *Counter) Stop() {\n\tc.controlChannel <- controlRecord{\"stop\", \"\"}\n}", "func (x *x509Handler) stop() {\n\tclose(x.stopChan)\n}", "func (w *eventCollector) stop(t *testing.T) Events {\n\treturn w.stopWait(t, time.Second)\n}", "func (c *Check) Stop() { close(c.stopCh) }", "func (n *Notary) Stop() {\n\tclose(n.stopCh)\n}", "func stop() {\n\tlog.Info(\"Maison is stopping...\")\n\n\tclosePlugins()\n\n\t// TODO: close stores\n\n\tbus.Stop()\n\n\tlog.Info(\"Maison is stopped\")\n}", "func (c *collector) Stop() {\n\tclose(c.stop)\n}", "func (c *Processor) Stop() (err error) {\n\tc.runState = RunStateStopped\n\treturn\n}", "func (c *Consumer) Stop() {\n\tc.stop <- true\n}", "func (s *streamStrategy) Stop() {\n\tclose(s.inputChan)\n\t<-s.done\n}", "func (p *Pacer) Stop() {\n\tclose(p.gate)\n}", "func (t *Tailer) Stop() {\n\tatomic.StoreInt32(&t.didFileRotate, 0)\n\tt.stop <- struct{}{}\n\tt.source.RemoveInput(t.path)\n\t// wait for the decoder to be flushed\n\t<-t.done\n}", "func (w *Watcher) Stop() { w.streamer.Stop() }", "func (m *Machine) Stop() {\n\tm.stopSign <- struct{}{}\n}", "func (player *musicPlayer) stop() {\n\tplayer.Lock()\n\tdefer player.Unlock()\n\tif player.state != nil {\n\t\tif player.state.chain != nil {\n\t\t\tplayer.state.chain.DeleteAll()\n\t\t}\n\t\tplayer.state.status = paused\n\t\tplayer.state.current = 0\n\t\tplayer.state.queue = make([]string, 0)\n\t}\n}", "func (q *testQueue) stop() {\n\tif atomic.LoadInt32(&q.active) == 0 {\n\t\treturn\n\t}\n\n\tatomic.StoreInt32(&q.active, 0)\n\n\tclose(q.wait)\n\tq.muw.Lock()\n\tq.wg.Wait()\n\tq.muw.Unlock()\n}", "func (p *noop) Stop() {}", "func (f *fakeAppReconcileWatcher) Stop() {\n\tclose(f.ch)\n}", "func (sp *StreamPool) Stop() {\n\t//sw.quitCh <- true\n}", "func (r *Reader) Stop() {\n\tatomic.StoreInt32(&r.stopped, 1)\n}", "func (eis *eventSocket) stop() error {\n\teis.log.Info(\"closing Chain IPC\")\n\terrs := wrappers.Errs{}\n\terrs.Add(eis.unregisterFn(), eis.socket.Close())\n\treturn errs.Err\n}", "func (w *Worker) Stop() {\n\tclose(w.stopCh)\n}", "func (h *ProxyHealth) stop() {\n\tif h.cancel != nil {\n\t\th.cancel <- struct{}{}\n\t\tclose(h.cancel)\n\t\th.cancel = nil\n\t}\n}", "func (e *Executor) Stop() {\n\tselect {\n\tcase <-e.stop:\n\tdefault:\n\t\tclose(e.stop)\n\t}\n}", "func (v *vtStopCrawler) stop() {\n\tfor _, worker := range v.workers {\n\t\tworker.stop()\n\t}\n\tclose(v.done)\n}", "func (g *Gopher) Stop() {\n\tif g.state == running {\n\t\tg.done <- struct{}{}\n\t\tg.state = stopped\n\t\tg.finalize()\n\t}\n}", "func (nr *namedReceiver) Stop(ctx context.Context, d Dest) error {\n\tmetricRecvTotal.WithLabelValues(d.Type.String(), \"STOP\")\n\treturn nr.Receiver.Stop(ctx, d)\n}", "func (manager *BarWriter) Stop() {\n\tmanager.stopChan <- struct{}{}\n}", "func (c *Controller) Stop(ctx hive.HookContext) error {\n\tdoneChan := make(chan struct{})\n\tgo func() {\n\t\tc.workerpool.Close()\n\t\tclose(doneChan)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-doneChan:\n\t}\n\n\treturn nil\n}", "func (c *Controller) Stop(ctx hive.HookContext) error {\n\tdoneChan := make(chan struct{})\n\tgo func() {\n\t\tc.workerpool.Close()\n\t\tclose(doneChan)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-doneChan:\n\t}\n\n\treturn nil\n}", "func Stop() {\n\tstopRunning <- true\n\n}", "func (e *exec) stop(ctx context.Context) {\n\t// Lock the mutex to prevent race conditions with Start\n\te.execMutex.Lock()\n\tdefer e.execMutex.Unlock()\n\n\t// Do the shutdown sequence once until the startup sequence resets\n\te.stopOnce.Do(func() {\n\t\tdefer func() {\n\t\t\t// reset startOnce so the startup sequence can happen again\n\t\t\te.startOnce = sync.Once{}\n\t\t}()\n\t\te.stopFn(ctx)\n\t})\n}", "func (s *stateMachine) Stop() {\n\tselect {\n\tcase s.cmds <- ReleaseMessage():\n\t\t// Also inform the state machine it should exit since the internal handler\n\t\t// may override the release message causing the task to be unreleaseable.\n\t\ts.stop()\n\tcase <-s.stopped:\n\t\t// Already stopped!\n\t}\n}", "func (a *App) Stop() {\n\ta.stopch <- struct{}{}\n}", "func (f *FakeOutput) Stop() error { return nil }", "func (s *server) stop() {\n\ts.stopMu.Lock()\n\tdefer s.stopMu.Unlock()\n\n\tclose(s.stopCh)\n\ts.stopCh = make(chan struct{})\n}", "func (converger *converger) Stop() {\n\tconverger.stop <- struct{}{}\n}", "func (b *Blinker) Stop() {\n\tclose(b.stop)\n}", "func (cMap *MyStruct) Stop(){\n\tcMap.stop <- true\n}", "func (b *Batch) Stop() {\n\tb.cancelFunc()\n}", "func (s *samplerBackendRateCounter) Stop() {\n\tclose(s.exit)\n\t<-s.stopped\n}", "func (r *RoverDriver) Stop() {\n r.commands <- stop\n}", "func (s *Streamer) Stop() {\n\tclose(s.stopc)\n}", "func (w InvokehWorker) Stop() {\n\tgo func() {\n\t\tw.QuitChan <- true\n\t}()\n}", "func (r *Runner) Stop() {\n\tr.logger.Info(\"Stopping runner...\")\n\tr.watcher.Stop()\n\t<-r.watcher.SndDoneCh // NOTE: Might need a timeout to prevent blocking forever\n\tclose(r.RcvDoneCh)\n}", "func (p *peer) stop() {\n\tp.fsmsMu.Lock()\n\tdefer p.fsmsMu.Unlock()\n\n\tfor _, fsm := range p.fsms {\n\t\tfsm.eventCh <- ManualStop\n\t}\n}", "func (m *Merge) Stop() {\n\tm.cancelFunc()\n}", "func (_e *MockCompactionPlanContext_Expecter) stop() *MockCompactionPlanContext_stop_Call {\n\treturn &MockCompactionPlanContext_stop_Call{Call: _e.mock.On(\"stop\")}\n}", "func (jbobject *ShuffleShuffleBlockResolver) Stop() {\n\t_, err := jbobject.CallMethod(javabind.GetEnv(), \"stop\", javabind.Void)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}", "func (d *ServerlessDemultiplexer) Stop(flush bool) {\n\tif flush {\n\t\td.ForceFlushToSerializer(time.Now(), true)\n\t}\n\n\td.statsdWorker.stop()\n\n\tif d.forwarder != nil {\n\t\td.forwarder.Stop()\n\t}\n}", "func (m *MultiRaft) Stop() {\n\tm.Transport.Stop(m.nodeID)\n\tm.ops <- &stopOp{}\n\t<-m.stopped\n\tm.multiNode.Stop()\n}", "func Stop(err error) error {\n\treturn terminalError{err}\n}", "func (f *Flame) Stop() {\n\tf.stop <- struct{}{}\n}", "func (c *Controller) Stop() {\n\tglog.Info(\"Stopping the SparkApplication controller\")\n\tc.queue.ShutDown()\n}", "func (h *handler) Stop() {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\tdefer func() {\n\t\th.rnn = false\n\t}()\n\n\tif h.rnn {\n\t\tclose(h.ch)\n\t}\n}", "func (a *Acceptor) Stop() {\n\t//TODO(student): Task 3 - distributed implementation\n\ta.stopChan <- 0\n}" ]
[ "0.7372499", "0.69866097", "0.69120806", "0.6910597", "0.67642725", "0.6730798", "0.6723413", "0.67230844", "0.66535413", "0.6557976", "0.6547551", "0.6458109", "0.6292822", "0.6267955", "0.6254633", "0.62406087", "0.6202152", "0.61858034", "0.6176004", "0.61700773", "0.6168878", "0.616183", "0.6149832", "0.613463", "0.61330825", "0.6129294", "0.61270106", "0.61217254", "0.61050266", "0.6101858", "0.609664", "0.60923725", "0.60883564", "0.60830766", "0.606964", "0.6064126", "0.60634655", "0.60580295", "0.6054482", "0.6048641", "0.60271716", "0.6025926", "0.6024733", "0.60070354", "0.59954983", "0.598757", "0.59777796", "0.59539056", "0.59461117", "0.59436387", "0.59409976", "0.5937827", "0.59234256", "0.59224236", "0.5921565", "0.59204096", "0.59199697", "0.59198487", "0.5919828", "0.590746", "0.58989537", "0.5898607", "0.5897671", "0.58974934", "0.58969784", "0.5896285", "0.5890064", "0.5883597", "0.5882215", "0.58794934", "0.5873915", "0.5862149", "0.58592266", "0.58592266", "0.58572865", "0.5854577", "0.5839093", "0.58342814", "0.5831682", "0.58278686", "0.58257914", "0.58223754", "0.5809287", "0.5803452", "0.58013165", "0.57968926", "0.5795666", "0.578881", "0.57869244", "0.5785151", "0.57844067", "0.57799125", "0.57754624", "0.5773326", "0.57629144", "0.5756141", "0.5755573", "0.5754875", "0.5748127", "0.57411546" ]
0.7991982
0
C returns the streaming data channel.
func (r *reducer) C() <-chan map[string]interface{} { return r.c }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Subscription) C() <-chan interface{} {\n\treturn s.channel\n}", "func (s *subscription) C() <-chan interface{} {\n\treturn s.c\n}", "func (c *dataReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (uc *UnboundedChannel) Get() <-chan interface{} {\n\treturn uc.channel\n}", "func (l *Logger) C() chan<- interface{} {\n\treturn l.src\n}", "func (s *Scanner) C() <-chan []Measurement {\n\treturn s.ch\n}", "func (p *HostedProgramInfo) Channel() io.ReadWriteCloser {\n\treturn p.TaoChannel\n}", "func (conn *Connection) Channel() chan []byte {\n\treturn conn.channel\n}", "func (p *literalProcessor) C() <-chan map[string]interface{} { return p.c }", "func (remote *SerialRemote) Channel() chan []byte {\n\treturn remote.channel\n}", "func (ticker *PausableTicker) GetChannel() <-chan time.Time {\n\treturn ticker.channel\n}", "func (o *Output) Read(channel int) *Buffer {\n\treturn o.channels[channel].Copy()\n}", "func (f *FFS) Get(ctx context.Context, c cid.Cid) (io.Reader, error) {\n\tstream, err := f.client.Get(ctx, &rpc.GetRequest{\n\t\tCid: util.CidToString(c),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader, writer := io.Pipe()\n\tgo func() {\n\t\tfor {\n\t\t\treply, err := stream.Recv()\n\t\t\tif err == io.EOF {\n\t\t\t\t_ = writer.Close()\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\t_ = writer.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = writer.Write(reply.GetChunk())\n\t\t\tif err != nil {\n\t\t\t\t_ = writer.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn reader, nil\n}", "func (s *p4RuntimeServer) StreamChannel(stream p4.P4Runtime_StreamChannelServer) error {\n\tfmt.Println(\"Starting bi-directional channel\")\n\tfor {\n\t\tinData, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Printf(\"%v\", inData)\n\t}\n\n\treturn nil\n}", "func (wp *Pool) C() <-chan Processor {\n\treturn wp.resultChan\n}", "func (p *pipeline) Channel() Channel {\n\treturn p.channel\n}", "func (c *webSocketClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (e *binaryExprEvaluator) C() <-chan map[string]interface{} { return e.c }", "func (c *Computation) Data() <-chan *messages.DataMessage {\n\treturn c.dataCh\n}", "func (p *Player) Channel() *api.Channel {\n\tretCh := make(chan *api.Channel)\n\tp.chGetChannel <- retCh\n\tc := <-retCh\n\treturn c\n}", "func (me *T) Data() <-chan float64 {\n\n\t// Create channel.\n\t//\n\t// We will return this to the caller.\n\t//\n\t// We will also spawn a goroutine and output the data from this datasack has onto it.\n\t//\n\t\tout := make(chan float64)\n\n\t// Spawn a goroutine that will output the data from this datasack onto the channel\n\t// we previously created.\n\t//\n\t// Note that this goroutine will probably block. But that's OK, since it is in\n\t// its own goroutine (and shouldn't take anything else down with it).\n\t//\n\t\tgo func() {\n\t\t\tfor _,value := range me.slice {\n\t\t\t\tout <- value\n\t\t\t}\n\n\t\t\tclose(out)\n\t\t}()\n\n\t// Return.\n\t\treturn out\n}", "func ReadData(c <-chan string) {\n\tfmt.Printf(\"Read Data: %s\\n\", <-c) // 只能收\n}", "func (wet *WETReader) Channel() (<-chan struct { Entry *WETEntry; Err error }) {\n channel := make(chan struct { Entry *WETEntry; Err error })\n go func() {\n defer func() {\n wet.Close()\n close(channel)\n }()\n for {\n entry, err := wet.extractEntry()\n channel <- struct { Entry *WETEntry; Err error }{ entry, err }\n if err != nil {\n return\n }\n }\n }()\n return channel\n}", "func (s *GameSocket) ReadChannel() <-chan *packet.Packet {\n\treturn s.readChan\n}", "func GetChannel(protocol, host string, port int, secureConfig *tls.Config) (ReaderWriterCloser, error) {\n\tvar conn net.Conn\n\tvar err error\n\tconn, err = net.Dial(protocol, host+\":\"+strconv.Itoa(port))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif protocol == \"tcp\" {\n\t\tconn.(*net.TCPConn).SetKeepAlive(true)\n\t\tconn.(*net.TCPConn).SetKeepAlivePeriod(30 * time.Second)\n\t}\n\tif secureConfig != nil {\n\t\tconn = tls.Client(conn, secureConfig)\n\t}\n\tvar readerWriter ReaderWriterCloser = &Channel{\n\t\tprotocol: protocol,\n\t\thost: host,\n\t\tport: port,\n\t\tconn: conn,\n\t\tmaxRead: 8 * 1024,\n\t\treadBuffer: make([]byte, 0),\n\t\twriteBuffer: make([]byte, 0),\n\t\twriteChannel: make(chan writeComplete, 100),\n\t\treadTimeout: 60 * time.Second,\n\t\twriteTimeout: 60 * time.Second,\n\t}\n\tgo readerWriter.(*Channel).writeRoutine()\n\treturn readerWriter, nil\n}", "func (f *feedback) Channel() (<-chan *FeedbackMessage, error) {\n\tif f.conn != nil {\n\t\treturn f.chanel, nil\n\t}\n\n\tif err := f.createConnection(); err != nil {\n\t\tlogerr(\"Unable to start feedback connection: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tf.stopWait.Add(1)\n\tgo f.monitorService()\n\n\treturn f.chanel, nil\n}", "func (c *webSocketFrameReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (res channelBase) Channel() *types.Channel {\n\treturn res.channel\n}", "func (o *KinesisOutput) GetOutputChannel() chan []byte {\n\treturn o.outputChannel\n}", "func (s VectOp) Stream() <-chan float64 {\n\tch := make(chan float64)\n\tgo feed(ch, s)\n\treturn ch\n}", "func (s *f64) Channel(c int) Floating {\n\treturn floatingChannel{\n\t\tbuffer: s,\n\t\tchannel: c,\n\t}\n}", "func (m *Manager) InputChannel() chan []byte {\n\treturn m.byteStream\n}", "func (m *MetricsExtracor) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (m *Module) Stream() <-chan bar.Output {\n\tch := base.NewChannel()\n\tgo m.worker(ch)\n\treturn ch\n}", "func (c *webSocketFrameSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (meta *MetaAI) GetChannel(c chan string) {\n\tmeta.l.Lock()\n\tdefer meta.l.Unlock()\n\n\tmeta.i = c\n}", "func getData(client pb.DataClient, filter *pb.DataFilter) {\r\n\t// calling the streaming API\r\n\tstream, err := client.GetData(context.Background(), filter)\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Error on get data: %v\", err)\r\n\t}\r\n\tfor {\r\n\t\tdata, err := stream.Recv()\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t}\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatalf(\"%v.GetData(_) = _, %v\", client, err)\r\n\t\t}\r\n\t\tlog.Printf(\"Data: %v\", data)\r\n\t}\r\n}", "func (m *mapper) C() <-chan map[string]interface{} { return m.c }", "func (c *requestServedFromCacheClient) GetStream() rpcc.Stream { return c.Stream }", "func (e *EventNotif) Channel() (res <-chan Event) {\n\treturn e.eventsCh\n}", "func (c *webSocketCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (stream *MAMWriteStream) Open() (trinary.Trytes, error) {\n\tchannelID, err := stream.m.ChannelCreate(5)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstream.currentChannelID = channelID\n\treturn channelID, nil\n}", "func (gi *Invoker) StreamRecv(param *common.Params) error {\n\t//gloryPkg := newGloryRequestPackage(\"\", param.MethodName, uint64(common.StreamSendPkg), param.Seq)\n\t//gloryPkg.Params = append(gloryPkg.Params, param.Value)\n\t//gloryPkg.Header.ChanOffset = param.ChanOffset\n\t//gloryPkg.Header.Seq = param.Seq\n\t//if err := gloryPkg.sendToConn(gi.gloryConnClient, gi.handler); err != nil {\n\t//\tlog.Error(\"StreamRecv: gloryPkg.sendToConn(gi.conn, gi.handler) err =\", err)\n\t//\treturn GloryErrorConnErr\n\t//}\n\treturn nil\n}", "func WrapDataChannel(rtcDataChannel RTCDataChannel) (*DataChannel, error) {\n\trr, rw, err := Pipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdc := &DataChannel{\n\t\tdc: rtcDataChannel,\n\t\trr: rr,\n\t}\n\tdc.dc.OnMessage(func(data []byte) {\n\t\tlog.WithField(\"data\", data).\n\t\t\tDebug(\"datachannel message\")\n\n\t\tif rw != nil {\n\t\t\t_, err := rw.Write(data)\n\t\t\tif err != nil {\n\t\t\t\trw.Close()\n\t\t\t\trw = nil\n\t\t\t}\n\t\t}\n\t})\n\treturn dc, nil\n}", "func (c *responseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *CounterChannel) Get() uint64 {\n\tc.check()\n\treturn <-c.readCh\n}", "func (c *webTransportClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (m *MetricsHolder) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (c *ChanReader) Read(out []byte) (int, error) {\n\tif c.buffer == nil {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(out, c.buffer)\n\tc.buffer = c.buffer[n:]\n\tif len(out) <= len(c.buffer) {\n\t\treturn n, nil\n\t} else if n > 0 {\n\t\t// We have some data to return, so make the channel read optional\n\t\tselect {\n\t\tcase p := <-c.input:\n\t\t\tif p == nil { // Stream was closed\n\t\t\t\tc.buffer = nil\n\t\t\t\tif n > 0 {\n\t\t\t\t\treturn n, nil\n\t\t\t\t}\n\t\t\t\treturn 0, io.EOF\n\t\t\t}\n\t\t\tn2 := copy(out[n:], p.Data)\n\t\t\tc.buffer = p.Data[n2:]\n\t\t\treturn n + n2, nil\n\t\tdefault:\n\t\t\treturn n, nil\n\t\t}\n\t}\n\tvar p *StreamChunk\n\tselect {\n\tcase p = <-c.input:\n\tcase <-c.interrupt:\n\t\tc.buffer = c.buffer[:0]\n\t\treturn n, ErrInterrupted\n\t}\n\tif p == nil { // Stream was closed\n\t\tc.buffer = nil\n\t\treturn 0, io.EOF\n\t}\n\tn2 := copy(out[n:], p.Data)\n\tc.buffer = p.Data[n2:]\n\treturn n + n2, nil\n}", "func (handle *Handle) GetStream() (Stream, error) {\n\tvar s Stream\n\tvar some *C.cudaStream_t\n\t//x := C.cudnnHandle_t(handle.Pointer())\n\n\ty := C.cudnnGetStream(handle.x, some)\n\ts.stream = *some\n\treturn s, Status(y).error(\"(*Handle).GetStream\")\n}", "func (c *loadingFinishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (std *ReaderService) Read() (<-chan []byte, error) {\n\tmc := make(chan []byte, 0)\n\n\tstd.pub.Subscribe(mc)\n\n\treturn mc, nil\n}", "func (c ConnectionAdapter) Channel() (Channel, error) {\n\treturn c.Connection.Channel()\n}", "func SourceData(data ...int) <-chan int {\n\tfmt.Println(\"num:\", len(data))\n\tch := make(chan int, 80000000)\n\tgo func() {\n\t\tfor _, v := range data {\n\t\t\tch <- v\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func (c *webSocketHandshakeResponseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *baseChannels) GetS3Channel() chan *S3Object {\n\treturn c.s3Channel\n}", "func (r *Readiness) GetChannel() chan ReadinessMessage {\n\treturn r.channel\n}", "func (nc *NetClient) readChannel() chan struct {\n\t*arbor.ProtocolMessage\n\terror\n} {\n\tout := make(chan struct {\n\t\t*arbor.ProtocolMessage\n\t\terror\n\t})\n\t// read messages continuously and send results back on a channel\n\tgo func() {\n\t\tdefer func() {\n\t\t\t// ensure send on closed channel doesn't cause panic\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tif _, ok := err.(runtime.Error); !ok {\n\t\t\t\t\t// silently cancel runtime errors, but allow other errors\n\t\t\t\t\t// to propagate.\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tfor {\n\t\t\tm := new(arbor.ProtocolMessage)\n\t\t\terr := nc.ReadWriteCloser.Read(m)\n\t\t\tout <- struct {\n\t\t\t\t*arbor.ProtocolMessage\n\t\t\t\terror\n\t\t\t}{m, err}\n\t\t}\n\t}()\n\treturn out\n}", "func (c *eventSourceMessageReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func UnbufferedChannel() {\n\t/*\n\tbufferred channel would be c := make(chan int, 50)\n\tunbufferred channel\n\t */\n\tc := make(chan int)\n\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t// put number onto channel\n\t\t\t// code stops until the value is taken from the channel\n\t\t\t// like a relay race\n\t\t\tc <- i\n\t\t}\n\t}() // self executing anonymous function\n\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t// take the number off the channel\n\t\t\t// receive the value from the channel and print it\n\t\t\tv := <-c\n\t\t\tfmt.Println(v)\n\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second)\n}", "func stream_copy(src io.Reader, dst io.Writer) <-chan int {\n\tbuf := make([]byte, 1024)\n\tsync_channel := make(chan int)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif con, ok := dst.(net.Conn); ok {\n\t\t\t\tcon.Close()\n\t\t\t\tlog.Printf(\"Connection from %v is closed\\n\", con.RemoteAddr())\n\t\t\t}\n\t\t\tsync_channel <- 0 // Notify that processing is finished\n\t\t}()\n\t\tfor {\n\t\t\tvar nBytes int\n\t\t\tvar err error\n\t\t\tnBytes, err = src.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tlog.Printf(\"Read error: %s\\n\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = dst.Write(buf[0:nBytes])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Write error: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn sync_channel\n}", "func (c *CryptoStreamConn) GetDataForWriting() []byte {\n\tdefer c.writeBuf.Reset()\n\tdata := make([]byte, c.writeBuf.Len())\n\tcopy(data, c.writeBuf.Bytes())\n\treturn data\n}", "func Stream(out chan<- Value) error {\n for {\n v, err := DoSomething() // HL\n if err != nil {\n return err\n }\n out <- v // HL\n }\n }", "func streamCopy(src io.Reader, dst io.Writer) <-chan int {\n\tbuf := make([]byte, 1024)\n\tsyncChannel := make(chan int)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif con, ok := dst.(net.Conn); ok {\n\t\t\t\tcon.Close()\n\t\t\t\t//log.Printf(\"Connection from %v is closed\\n\", con.RemoteAddr())\n\t\t\t}\n\t\t\tsyncChannel <- 0 // Notify that processing is finished\n\t\t}()\n\t\tfor {\n\n\t\t\tvar nBytes int\n\t\t\tvar err error\n\t\t\tnBytes, err = src.Read(buf)\n\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\t//log.Printf(\"Read error: %s\\n\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = dst.Write(buf[0:nBytes])\n\t\t\tif err != nil {\n\t\t\t\t//log.Fatalf(\"Write error: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn syncChannel\n}", "func (v Vehicle) Stream() (chan *StreamEvent, chan error, error) {\n\turl := StreamURL + \"/stream/\" + strconv.Itoa(v.VehicleID) + \"/?values=\" + StreamParams\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.SetBasicAuth(ActiveClient.Auth.Email, v.Tokens[0])\n\tresp, err := ActiveClient.HTTP.Do(req)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\teventChan := make(chan *StreamEvent)\n\terrChan := make(chan error)\n\tgo readStream(resp, eventChan, errChan)\n\n\treturn eventChan, errChan, nil\n}", "func NewChannel() (chan *fluent.FluentRecordSet, chan Stat) {\n\tmessageCh := make(chan *fluent.FluentRecordSet, MessageChannelBufferLen)\n\tmonitorCh := make(chan Stat, MonitorChannelBufferLen)\n\treturn messageCh, monitorCh\n}", "func NewChannel() (chan *fluent.FluentRecordSet, chan Stat) {\n\tmessageCh := make(chan *fluent.FluentRecordSet, MessageChannelBufferLen)\n\tmonitorCh := make(chan Stat, MonitorChannelBufferLen)\n\treturn messageCh, monitorCh\n}", "func (c *requestInterceptedClient) GetStream() rpcc.Stream { return c.Stream }", "func (r *Receiver) Read() interface{} {\n\tutils.Debugln(\"Reading\")\n\tb := <-r.C // wait for a broadast channel\n\tv := b.v // retrieve value from received broadcastchannel\n\tr.C <- b // write same broadcastchannel to broadcastchannel\n\tr.C = b.c // broadcastchannel now becomes bc from broadcast\n\treturn v // return received value\n}", "func Stream(ctx context.Context, wC etcd.WatchChan) <-chan *etcd.Event {\n\teC := make(chan *etcd.Event, 1024)\n\n\tgo func(ctx context.Context, ec chan *etcd.Event) {\n\t\t// this unblocks any callers ranging on ec\n\t\tdefer close(ec)\n\n\t\t// etcd client will close this channel if error occurs\n\t\tfor wResp := range wC {\n\t\t\tif ok, err := chkctx.Check(ctx); ok {\n\t\t\t\tlog.Info().Str(\"component\", \"Stream\").Msgf(\"stream ctx canceled. returning: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif wResp.Canceled {\n\t\t\t\tlog.Info().Str(\"component\", \"Stream\").Msgf(\"watch channel error encountered. returning: %v\", wResp.Err())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, event := range wResp.Events {\n\t\t\t\teC <- event\n\t\t\t}\n\t\t}\n\t}(ctx, eC)\n\n\treturn eC\n}", "func (c *ChangeWatcher) outC() chan *RoomChange {\n if len(c.buffer) <= 0 {\n return nil\n }\n return c.out\n}", "func (closer *Closer) CloseChannel() chan struct{} {\n\treturn closer.channel\n}", "func (r *ChannelReader) Read(b []byte) (sz int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tif len(r.buf) <= len(b) {\n\t\t\t\tsz = len(r.buf)\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = nil\n\t\t\t} else {\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = r.buf[len(b):]\n\t\t\t\tsz = len(b)\n\t\t\t}\n\t\t\treturn sz, nil\n\t\t}\n\n\t\tvar ok bool\n\t\tif r.deadline.IsZero() {\n\t\t\tr.buf, ok = <-r.c\n\t\t} else {\n\t\t\ttimer := time.NewTimer(r.deadline.Sub(time.Now()))\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase r.buf, ok = <-r.c:\n\t\t\tcase <-timer.C:\n\t\t\t\treturn 0, context.DeadlineExceeded\n\t\t\t}\n\t\t}\n\t\tif len(r.buf) == 0 && !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n}", "func (swp *SourceWorkerPool) GetOutputChannel() (chan map[string]interface{}, error) {\n\treturn swp.outputChannel, nil\n}", "func (p *Publisher) GetChannel() *amqp.Channel {\n\tp.publicMethodsLock.Lock()\n\tdefer p.publicMethodsLock.Unlock()\n\treturn p.getChannelWithoutLock()\n}", "func StreamCreateFile(data interface{}, offset int, flags Flags) (Channel, error) {\n\tvar ch C.DWORD\n\tswitch data := data.(type) {\n\tcase CBytes:\n\t\tch = C.BASS_StreamCreateFile(1, data.Data, culong(offset), culong(data.Length), cuint(flags))\n\tcase string:\n\t\tcstring := unsafe.Pointer(C.CString(data))\n\t\tdefer C.free(cstring)\n\t\tch = C.BASS_StreamCreateFile(0, cstring, culong(offset), 0, cuint(flags))\n\tcase []byte:\n\t\tcbytes := C.CBytes(data)\n\t\tch = C.BASS_StreamCreateFile(1, cbytes, culong(offset), culong(len(data)), cuint(flags))\n\t\t// unlike BASS_SampleLoad, BASS won't make a copy of the sample data internally, which means we can't just pass a pointer to the Go bytes. Instead we need to set a sync to free the bytes when the stream it's associated with is freed\n\t\tif ch != 0 {\n\t\t\tchannel := Channel(ch)\n\t\t\t_, err := channel.SetSync(SYNC_FREE, SYNC_ONETIME, 0, SyncprocFree, cbytes)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n\treturn channelToError(ch)\n}", "func (sc *SoundCloud) Stream(track string) (io.ReadCloser, error) {\n\t// Get the HTTP Stream\n\trsp, err := http.Get(sc.streamUrl(track).String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Createa http stream buffer\n\tbuff := buffer.HTTPBuffer(rsp)\n\tgo buff.Buffer() // Start buffering\n\tscs := &SoundCloudStream{\n\t\tbuffer: buff,\n\t\tdecoder: &mpa.Reader{Decoder: &mpa.Decoder{Input: buff}},\n\t}\n\treturn scs, nil\n}", "func (cc *CounterControl) StreamValues() (chan *CounterData, error) {\n\tentity := cc.counter.ReadWildcardRequest()\n\tentityList := []*p4V1.Entity{entity}\n\n\tcounterEntityCh, err := cc.control.Client.ReadEntities(entityList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcdataChannel := make(chan *CounterData, cc.counter.Size)\n\tgo func() {\n\t\tdefer close(cdataChannel)\n\t\tfor e := range counterEntityCh {\n\t\t\tcounterData := getCounterData(e)\n\t\t\tcdataChannel <- &counterData\n\t\t}\n\t}()\n\n\treturn cdataChannel, nil\n}", "func (p *Pool) Consume() <-chan interface{} {\n\treturn p.c\n}", "func BufferedChannels(){\n\tc := make(chan int, 2)\n\tc <- 1\n\tc <- 2\n\tfmt.Println(<-c)\n\tfmt.Println(<-c)\n}", "func (std *LineReaderService) Read() (<-chan []byte, error) {\n\tmc := make(chan []byte, 0)\n\n\tstd.pub.Subscribe(mc)\n\n\treturn mc, nil\n}", "func (s *Chan) Pipe(rwc io.ReadWriteCloser) {\n\ts.connection = rwc\n\tgo s.readFromReader(rwc)\n\tgo s.writeToWriter(rwc)\n}", "func outputData(outputChannel chan string) {\n\n\tfor {\n\t\tdata := <-outputChannel\n\t\tfmt.Println(data)\n\t}\n}", "func (l *ChannelList) Get(key string) *Channel {\n\t// get a conn bucket\n\tb := l.Bucket(key)\n\tb.Lock()\n\tif c, ok := b.data[key]; ok {\n\t\tb.Unlock()\n\t\tChStat.IncrAccess()\n\t\treturn c\n\t}\n\tb.Unlock()\n\treturn nil\n}", "func (c *requestWillBeSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportConnectionEstablishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (p *pool) get() (*channel, error) {\n\tif p.closed {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\tactiveChannel, ok := <-p.readyChannel\n\tif !ok {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\treturn activeChannel, nil\n}", "func (c *webSocketFrameErrorClient) GetStream() rpcc.Stream { return c.Stream }", "func (k *ChannelKeeper) Channel() *amqp.Channel {\n\treturn k.msgCh\n}", "func (this *FtpsClient) OpenFtpDataChannel(_FtpCommand_S string, _ExpectedReplyCode_i int) (rReplyCode_i int, rReplyMessage_S string, rRts error) {\n\trRts = this.sendRequestToFtpServerDataConn(_FtpCommand_S, _ExpectedReplyCode_i)\n\treturn\n}", "func (c *Client) StreamingDirect(ctx context.Context) (chan Event, error) {\n\treturn c.streaming(ctx, \"direct\", nil)\n}", "func (c *webSocketWillSendHandshakeRequestClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (ch *RingChannel) Out() <-chan interface{} {\n\treturn ch.output\n}", "func (c *remoteConn) OpenChannel(name string, data []byte) (ssh.Channel, error) {\n\tchannel, _, err := c.sconn.OpenChannel(name, data)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn channel, nil\n}", "func (r *realTimer) C() <-chan time.Time {\n\treturn r.timer.C\n}", "func (c *cdcClient) recv() {\n\tc.debug(\"recv call\")\n\tdefer c.debug(\"recv return\")\n\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tc.shutdown(err)\n\t\t}\n\t\tclose(c.events)\n\t}()\n\n\tvar now time.Time\n\tfor {\n\t\t_, bytes, rerr := c.wsConn.ReadMessage()\n\t\tnow = time.Now()\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {\n\t\t\t\terr = rerr\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// CDC events should be the bulk of data we recv, so presume it's that.\n\t\tvar e CDCEvent\n\t\tif err = json.Unmarshal(bytes, &e); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// If event ID is set (not empty), then it's a CDC event as expected\n\t\tif e.Id != \"\" {\n\t\t\tc.debug(\"cdc event: %#v\", e)\n\t\t\tselect {\n\t\t\tcase c.events <- e: // send CDC event to caller\n\t\t\tdefault:\n\t\t\t\tc.debug(\"caller blocked\")\n\t\t\t\tc.shutdown(ErrCallerBlocked)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// It's not a CDC event, so it should be a control message\n\t\t\tvar msg map[string]interface{}\n\t\t\tif err = json.Unmarshal(bytes, &msg); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, ok := msg[\"control\"]; !ok {\n\t\t\t\t// This shouldn't happen: data is not a CDC event or a control message\n\t\t\t\tc.shutdown(ErrBadData)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err = c.control(msg, now); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *Channel) Channels() Channels {\n\treturn c.children\n}", "func (r *chanReader) Read(data []byte) (int, error) {\n\tvar ok bool\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tn := copy(data, r.buf)\n\t\t\tr.buf = r.buf[n:]\n\t\t\tmsg := windowAdjustMsg{\n\t\t\t\tPeersId: r.clientChan.peersId,\n\t\t\t\tAdditionalBytes: uint32(n),\n\t\t\t}\n\t\t\treturn n, r.clientChan.writePacket(marshal(msgChannelWindowAdjust, msg))\n\t\t}\n\t\tr.buf, ok = <-r.data\n\t\tif !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}", "func bufferedChannelTest() {\n\tch := make(chan int, 2)\n\tch <- 1\n\tch <- 2\n\t// ch <- 3 \n\tfmt.Println(<-ch)\n\tfmt.Println(<-ch)\n}" ]
[ "0.6759161", "0.6602864", "0.61977434", "0.6079438", "0.6015766", "0.59815305", "0.5894958", "0.5827573", "0.5811892", "0.5789283", "0.5787107", "0.5769193", "0.5764194", "0.57620907", "0.5744001", "0.57320946", "0.5710497", "0.56509525", "0.56507605", "0.5619693", "0.56017905", "0.55412376", "0.5511171", "0.55090964", "0.5497537", "0.5495607", "0.5485725", "0.54844636", "0.5469122", "0.54626715", "0.544634", "0.5438606", "0.5433448", "0.5426227", "0.5426032", "0.541403", "0.5402727", "0.53927493", "0.5391055", "0.5363698", "0.53503805", "0.53499436", "0.5327847", "0.5324007", "0.53198254", "0.5317453", "0.53115785", "0.5304445", "0.5300533", "0.528647", "0.5275732", "0.5242419", "0.5225599", "0.5221367", "0.5218112", "0.52100986", "0.5196145", "0.5187939", "0.51850754", "0.51704204", "0.516902", "0.51667434", "0.516405", "0.5132859", "0.512949", "0.51228505", "0.51228505", "0.51204574", "0.5118935", "0.5117812", "0.510126", "0.5093387", "0.5090184", "0.5083214", "0.50773835", "0.50705665", "0.5065232", "0.5064193", "0.5061628", "0.5051974", "0.50492626", "0.5042226", "0.5035097", "0.5022692", "0.501978", "0.5019312", "0.5008553", "0.49940854", "0.4989732", "0.4983017", "0.49754548", "0.4958028", "0.49492064", "0.49402693", "0.4940178", "0.49344555", "0.49289474", "0.49203157", "0.49100748", "0.4905373" ]
0.5659543
17
name returns the source name.
func (r *reducer) name() string { return r.stmt.Source.(*Measurement).Name }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Source) Name() string {\n\treturn s.SourceName\n}", "func (s *Source) Name() string {\n\treturn \"spyse\"\n}", "func (r Source) GetName() string {\n\treturn r.Name\n}", "func (s *Source) Name() string {\n\treturn \"crtsh\"\n}", "func (e *Event) SourceName() collection.Name {\n\tif e.Source != nil {\n\t\treturn e.Source.Name()\n\t}\n\treturn \"\"\n}", "func (s Source) Name() string { return \"rdt\" }", "func (d *DataPacket) SourceName() string {\n\ti := 44 //the ending index for the string, because it is 0 terminated\n\tfor i < 108 && d.data[i] != 0 {\n\t\ti++\n\t}\n\treturn string(d.data[44:i])\n}", "func (s *Source) GetName() string {\n\treturn s.Name\n}", "func (s *Source) Name() string {\n\treturn \"github\"\n}", "func (s *YAMLFileSource) Name() (name string) {\n\treturn fmt.Sprintf(\"yaml file(%s)\", s.path)\n}", "func (o ResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o CloudHealthcareSourceOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v CloudHealthcareSource) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o *ActionDTO) GetSourceName() string {\n\tif o == nil || o.SourceName == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SourceName\n}", "func (o GetEventSourcesSourceOutput) EventSourceName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetEventSourcesSource) string { return v.EventSourceName }).(pulumi.StringOutput)\n}", "func (o ContainerResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ContainerResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o DataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DataSource) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o *BulletinDTO) GetSourceName() string {\n\tif o == nil || o.SourceName == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SourceName\n}", "func (o *TransactionSplit) GetSourceName() string {\n\tif o == nil || o.SourceName.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SourceName.Get()\n}", "func (s *scraper) Source() string {\n\treturn s.name\n}", "func (g componentSourceGenerator) GetName() string {\n\treturn g.Name\n}", "func (o CloudHealthcareSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *CloudHealthcareSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *Structured) GetName() string {\n\treturn s.cloudEvent.Source\n}", "func (s *CommandLineSource) Name() (name string) {\n\treturn \"command-line\"\n}", "func (o ResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecVolumePersistentVolumeClaimSpecDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumePersistentVolumeClaimSpecDataSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (j *Jsonnet) Name(wantsNameSpaced bool) string {\n\tbase := filepath.Base(j.source)\n\tname := strings.TrimSuffix(base, filepath.Ext(base))\n\tif !wantsNameSpaced {\n\t\treturn name\n\t}\n\n\tif j.module == \"/\" {\n\t\treturn name\n\t}\n\n\treturn path.Join(j.module, name)\n}", "func (o ArgoCDExportSpecStoragePvcDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ArgoCDExportSpecStoragePvcDataSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (src *Tracer) Name() string {\n\treturn src.name\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesSecretOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSourcesSecret) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (s *Data) Source() string {\n\treturn fmt.Sprintf(\"data:%v\", path.Clean(s.Location))\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesSecretOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSourcesSecret) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (s *MapSource) Name() (name string) {\n\treturn \"map\"\n}", "func (*Plugin) SourceFileName() string {\n\treturn sourceFileName\n}", "func (*Plugin) SourceFileName() string {\n\treturn sourceFileName\n}", "func (o ApplicationStatusOperationStateSyncResultSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesSecretPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceProjectedSourcesSecret) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumePersistentVolumeClaimSpecDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumePersistentVolumeClaimSpecDataSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (mySource *Source) Source() (param string) {\n\treturn mySource.Sourcevar\n}", "func (o RegistryTaskSourceTriggerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RegistryTaskSourceTrigger) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o BuildSpecSourceCredentialsOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildSpecSourceCredentials) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o BuildRunStatusBuildSpecSourceCredentialsOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecSourceCredentials) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationSpecSourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ApplicationSpecSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesSecretPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceProjectedSourcesSecret) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *EnvironmentSource) Name() (name string) {\n\treturn \"environment\"\n}", "func (o SourceOutput) DisplayName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Source) pulumi.StringOutput { return v.DisplayName }).(pulumi.StringOutput)\n}", "func (c *auditLog) getName() string {\n\treturn c.name\n}", "func (o CloudHealthcareSourceResponseOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CloudHealthcareSourceResponse) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumePersistentVolumeClaimSpecDataSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumePersistentVolumeClaimSpecDataSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *SecretsSource) Name() (name string) {\n\treturn \"secrets\"\n}", "func (a Asset) source() string {\n\tsource := fileNameWithoutExt(a.PublicID)\n\n\tif !isURL(source) {\n\t\tvar err error\n\t\tsource, err = url.QueryUnescape(strings.Replace(source, \"%20\", \"+\", -1))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tsource = smartEscape(source)\n\n\tif a.Suffix != \"\" {\n\t\tsource += fmt.Sprintf(\"/%s\", a.Suffix)\n\t}\n\n\tif filepath.Ext(a.PublicID) != \"\" {\n\t\tsource += filepath.Ext(a.PublicID)\n\t}\n\n\treturn source\n}", "func (s *SourceImportAuthor) GetName() string {\n\tif s == nil || s.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *s.Name\n}", "func (s *Stream) Name() string { return s.file.Name() }", "func (p ProjectInit) Name() string {\n\treturn string(p)\n}", "func (o ApplicationStatusOperationStateSyncResultSourceHelmParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourceHelmParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (ci converterInfo) Source() string {\n\treturn ci.source\n}", "func (o ResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusHistorySourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusSyncComparedToSourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (fi *fileInfo) Name() string { return fi.name }", "func (o BuildRunStatusBuildSpecSourceCredentialsPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildRunStatusBuildSpecSourceCredentials) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o TargetProjectOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *TargetProject) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (this Intro) name() estring {\n\treturn this.n\n}", "func (dt *Targeter) name() string {\n\tvar id string\n\tif dt.IDs != nil {\n\t\tid = \"{id}\"\n\t}\n\treturn fmt.Sprintf(\"%s %s/%s/%s\", dt.Method, dt.BaseURL, dt.Endpoint, id)\n}", "func (fe *fileEntry) Name() string { return fe.name }", "func (o ApplicationSpecSourceHelmParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourceHelmParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (e *Event) Source() string {\n\treturn e.conn\n}", "func (o ApplicationOperationSyncSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationOperationSyncSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesConfigMapOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSourcesConfigMap) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o LookupServiceIntegrationResultOutput) SourceServiceName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupServiceIntegrationResult) string { return v.SourceServiceName }).(pulumi.StringOutput)\n}", "func (o ApplicationStatusOperationStateSyncResultSourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesConfigMapOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSourcesConfigMap) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (t Type) Source() string {\n\treturn t.source\n}", "func (o *ActionDTO) SetSourceName(v string) {\n\to.SourceName = &v\n}", "func (g GitHub) Name() string {\n\tif g.local != \"\" {\n\t\treturn g.local\n\t}\n\treturn g.binary\n}", "func (p Packet) Name() (name string) {\n\t// todo: think of ways to make this not a compiled in hack\n\t// todo: collectd 4 uses different patterns for some plugins\n\t// https://collectd.org/wiki/index.php/V4_to_v5_migration_guide\n\tswitch p.Plugin {\n\tcase \"df\":\n\t\tname = fmt.Sprintf(\"df_%s_%s\", p.PluginInstance, p.TypeInstance)\n\tcase \"interface\":\n\t\tname = fmt.Sprintf(\"%s_%s\", p.Type, p.PluginInstance)\n\tcase \"load\":\n\t\tname = \"load\"\n\tcase \"memory\":\n\t\tname = fmt.Sprintf(\"memory_%s\", p.TypeInstance)\n\tdefault:\n\t\tname = fmt.Sprintf(\"%s_%s_%s_%s\", p.Plugin, p.PluginInstance, p.Type, p.TypeInstance)\n\t}\n\treturn name\n}", "func (o ApplicationStatusHistorySourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o SiaFileInfo) Name() string {\n\treturn o.FileName\n}", "func (o ApplicationStatusSyncComparedToSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func source() string {\n\treturn \"I am an evil gopher\"\n}", "func (c *withNameAndCode) Name() string {\n\treturn c.name\n}", "func (o BuildSpecSourceCredentialsPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildSpecSourceCredentials) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *BulletinDTO) SetSourceName(v string) {\n\to.SourceName = &v\n}", "func GenVolumeSourceName(source string, index int64) string {\n\treturn source + common.NameSeparator + strconv.FormatInt(index, 10)\n}", "func (e *EDNS) Name() string { return name }", "func fileSource(filename string, i int) string {\n\treturn fmt.Sprintf(\"%s:%d\", filename, i)\n}", "func (ds *Datasource) GetName() string {\n\treturn ds.name\n}", "func (e *Entry) Name() string {\n\tif len(e.path) == 0 {\n\t\treturn \"\"\n\t}\n\treturn e.path[len(e.path)-1]\n}" ]
[ "0.78794163", "0.75167346", "0.7478469", "0.7449441", "0.7437304", "0.7387519", "0.73252875", "0.72747105", "0.7236276", "0.7090298", "0.7044449", "0.7044449", "0.6945361", "0.6810892", "0.68048626", "0.6776871", "0.6776871", "0.6724642", "0.6661173", "0.6651805", "0.6625454", "0.6564692", "0.65482056", "0.6521428", "0.6444199", "0.6427358", "0.6427358", "0.64273053", "0.63985956", "0.63985956", "0.63970643", "0.63568366", "0.63545203", "0.6345645", "0.6311637", "0.6308424", "0.6303546", "0.6260226", "0.6260226", "0.6247084", "0.6245244", "0.6245244", "0.62372345", "0.62371534", "0.62371534", "0.6222376", "0.6209819", "0.62011534", "0.6200014", "0.61755013", "0.6159335", "0.61291414", "0.6119233", "0.6113662", "0.61117154", "0.61012226", "0.6096734", "0.60674095", "0.6056915", "0.60292953", "0.6028448", "0.60220057", "0.6015895", "0.6010574", "0.5983533", "0.5958791", "0.5958791", "0.5958361", "0.59547806", "0.59520304", "0.59423393", "0.5939665", "0.59377843", "0.59375775", "0.59307474", "0.5929412", "0.5926923", "0.5923438", "0.59232825", "0.59212494", "0.5920124", "0.59168303", "0.59166396", "0.5913426", "0.5906614", "0.5902419", "0.58879215", "0.5887569", "0.58861053", "0.5886017", "0.5874572", "0.58686024", "0.58668345", "0.58668345", "0.5858391", "0.5853577", "0.5842946", "0.5840868", "0.58404166", "0.58368796" ]
0.64019334
28
run runs the reducer loop to read mapper output and reduce it.
func (r *reducer) run() { loop: for { // Combine all data from the mappers. data := make(map[string][]interface{}) for _, m := range r.mappers { kv, ok := <-m.C() if !ok { break loop } for k, v := range kv { data[k] = append(data[k], v) } } // Reduce each key. for k, v := range data { r.fn(k, v, r) } } // Mark the channel as complete. close(r.c) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *mapper) run() {\n\tfor m.itr.NextIterval() {\n\t\tm.fn(m.itr, m)\n\t}\n\tclose(m.c)\n}", "func (s *JsonEntryCounter) Mapper(r io.Reader, w io.Writer) error {\n\tlog.Printf(\"map_input_file %s\", os.Getenv(\"map_input_file\"))\n\twg, out := mrproto.JsonInternalOutputProtocol(w)\n\n\t// for efficient counting, use an in-memory counter that flushes the least recently used item\n\t// less Mapper output makes for faster sorting and reducing.\n\tcounter := lru.NewLRUCounter(func(k interface{}, v int64) {\n\t\tout <- mrproto.KeyValue{k, v}\n\t}, 100)\n\n\tfor line := range mrproto.RawInputProtocol(r) {\n\t\tvar record map[string]json.RawMessage\n\t\tif err := json.Unmarshal(line, &record); err != nil {\n\t\t\tgomrjob.Counter(\"example_mr\", \"Unmarshal Error\", 1)\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgomrjob.Counter(\"example_mr\", \"Map Lines Read\", 1)\n\t\tcounter.Incr(\"lines_read\", 1)\n\t\tfor k, _ := range record {\n\t\t\tcounter.Incr(k, 1)\n\t\t}\n\t}\n\tcounter.Flush()\n\tclose(out)\n\twg.Wait()\n\treturn nil\n}", "func (r *reducer) start() {\n\tfor _, m := range r.mappers {\n\t\tm.start()\n\t}\n\tgo r.run()\n}", "func (l *lex) run() {\n\tfor state := lexMapKey; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.tokens)\n}", "func doMap(\n\tjobName string, // The name of the MapReduce job\n\tmapTaskNumber int, // Which map task this is\n\tinFile string, // File name of the input file.\n\tnReduce int, // The number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\tfileContent, err := ioutil.ReadFile(inFile)\n\tif err != nil {\n\t\tfmt.Printf(\n\t\t\t\"Failed to read file=%s, err=%s\\n\",\n\t\t\tinFile,\n\t\t\terr.Error())\n\t\tpanic(err)\n\t}\n\n\t// Map file content to key-value pairs.\n\tkvs := mapF(inFile, string(fileContent))\n\n\t// Creates per-reducer JSON serializer.\n\tencoders := make([]*json.Encoder, nReduce)\n\tfor i := 0; i < nReduce; i++ {\n\t\toutputFileName := reduceName(jobName, mapTaskNumber, i)\n\n\t\treducerFile, err := os.OpenFile(\n\t\t\toutputFileName, os.O_CREATE|os.O_WRONLY, 0777)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\n\t\t\t\t\"Faild to open file=%s in write mode, err=%s\\n\",\n\t\t\t\toutputFileName, err.Error())\n\t\t\tpanic(err)\n\t\t}\n\n\t\tdefer reducerFile.Close()\n\n\t\tencoders[i] = json.NewEncoder(reducerFile)\n\t}\n\n\t// Put each key-value pair to its corresponding reducer file\n\t// sharded by key.\n\tfor _, kv := range kvs {\n\t\treducerTaskNumber := ihash(kv.Key) % uint32(nReduce)\n\t\terr := encoders[reducerTaskNumber].Encode(&kv)\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\n\t\t\t\t\"Failed to encode (k=%s, v=%s), err=%s\\n\",\n\t\t\t\tkv.Key,\n\t\t\t\tkv.Value,\n\t\t\t\terr.Error())\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\toutFile string, // write the output here\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\t//\n\t// You will need to write this function.\n\t//\n\t// You'll need to read one intermediate file from each map task;\n\t// reduceName(jobName, m, reduceTaskNumber) yields the file\n\t// name from map task m.\n\t//\n\t// Your doMap() encoded the key/value pairs in the intermediate\n\t// files, so you will need to decode them. If you used JSON, you can\n\t// read and decode by creating a decoder and repeatedly calling\n\t// .Decode(&kv) on it until it returns an error.\n\t//\n\t// You may find the first example in the golang sort package\n\t// documentation useful.\n\t//\n\t// reduceF() is the application's reduce function. You should\n\t// call it once per distinct key, with a slice of all the values\n\t// for that key. reduceF() returns the reduced value for that key.\n\t//\n\t// You should write the reduce output as JSON encoded KeyValue\n\t// objects to the file named outFile. We require you to use JSON\n\t// because that is what the merger than combines the output\n\t// from all the reduce tasks expects. There is nothing special about\n\t// JSON -- it is just the marshalling format we chose to use. Your\n\t// output code will look something like this:\n\t//\n\t// enc := json.NewEncoder(file)\n\t// for key := ... {\n\t// \tenc.Encode(KeyValue{key, reduceF(...)})\n\t// }\n\t// file.Close()\n\t//\n\n\t// CUTSOM\n\t// get all kv pairs from tmp data\n\t// Dataflow\n\t// 1. read each tmp file which map task generated\n\t// 2. decode file json data to kv\n\t// 3. sorted kv list by k\n\t// 4. group the kv pairs to k-(v list) which k is same\n\t// 5. get reduce(k, v-list) result and write the k-result to output file\n\n\tfmt.Println(\"doReduce1\")\n\tkvList := make([]KeyValue, 0)\n\tfor i := 0; i < nMap; i++ {\n\n\t\treadFileName := reduceName(jobName, i, reduceTaskNumber)\n\t\treadFile, readFileErr := os.Open(readFileName)\n\t\tif readFileErr != nil {\n\t\t\treturn\n\t\t}\n\t\tdec := json.NewDecoder(readFile)\n\t\tfor dec.More() {\n\t\t\tvar kv KeyValue\n\t\t\tdecErr := dec.Decode(&kv)\n\t\t\tif decErr != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tkvList = append(kvList, kv)\n\t\t}\n\t}\n\n\t//close(kvChan)\n\t//fmt.Println(\"doReduce chan finish\")\n\n\t// sorted or not\n\t// we can skip sort procedure daze!\n\tkvsMap := make(map[string][]string)\n\tfor _, kv := range kvList {\n\t\tif _, ok := kvsMap[kv.Key]; ok {\n\t\t\t// found key in the kvList\n\t\t\tkvsMap[kv.Key] = append(kvsMap[kv.Key], kv.Value)\n\t\t} else {\n\t\t\tkvsMap[kv.Key] = make([]string, 1)\n\t\t\tkvsMap[kv.Key] = append(kvsMap[kv.Key], kv.Value)\n\t\t}\n\t}\n\n\twriteFile, writeFileErr := os.Create(outFile)\n\tif writeFileErr != nil {\n\t\treturn\n\t}\n\tdefer writeFile.Close()\n\n\toutEnc := json.NewEncoder(writeFile)\n\tfor key, vlist := range kvsMap {\n\t\toutEnc.Encode(KeyValue{key, reduceF(key, vlist)})\n\t}\n\n}", "func (p *AsmParser) run() {\n\tdefer close(p.Output)\n\n\tvar errs errorList\n\n\tif p.Error != nil {\n\t\treturn\n\t}\n\n\tvar i asm // instruction, reset to 0 after every write\n\tvar err error\n\tvar d, c, j asm // dest, comp, jump, OR together for final instruction\n\n\twriteResult := func() {\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\n\t\tif errs == nil {\n\t\t\tp.Output <- fmt.Sprintf(\"%.16b\", i)\n\t\t}\n\n\t\ti = 0\n\t}\n\n\tfor index, lex := range p.lexemes {\n\n\t\tswitch lex.instruction {\n\n\t\t// possible edge case, hitting EOF before an EOL\n\t\tcase asmEOF:\n\t\t\tfallthrough\n\n\t\tcase asmEOL:\n\t\t\tprev := p.previousInstruction(index)\n\n\t\t\tif prev.instruction != asmLABEL {\n\t\t\t\twriteResult()\n\t\t\t}\n\n\t\tcase asmAINSTRUCT:\n\t\t\tprev := p.previousInstruction(index)\n\n\t\t\tif prev.instruction == asmAINSTRUCT {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"WARNING - redundant loading of A-Register on line %d\\n\", prev.lineNum)\n\t\t\t}\n\n\t\t\ti, err = p.mapToA(lex)\n\n\t\tcase asmLABEL:\n\t\t\tindex += 2 // skip label and EOL\n\t\t\tcontinue\n\n\t\tcase asmJUMP:\n\t\t\tj, err = mapJmp(lex.value)\n\t\t\ti = i | j\n\n\t\tcase asmCOMP:\n\t\t\tc, err = mapCmp(lex.value)\n\t\t\ti = i | c\n\n\t\tcase asmDEST:\n\t\t\td, err = mapDest(lex.value)\n\t\t\ti = i | d\n\t\t}\n\n\t\tindex++\n\t}\n\n\tp.Error = errs.asError()\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\tcallMs := true\n\n\ttfl := make([]string, 0)\n\tfor callMs {\n\t\tcallMs, _ = callMaster(mapf, &tfl)\n\t\t//time.Sleep(5 * time.Second)\n\t}\n\n\t//\tsort.Sort(ByKey(intermediate))\n\trand.Seed(time.Now().UnixNano())\n\tred := rand.Intn(1000)\n\tfmt.Printf(\"Reducer filename %d \\n\", red)\n\toname := fmt.Sprintf(\"mr-out-%d.txt\", red)\n\n\tofile, _ := os.Create(oname)\n\tintermediate1 := []KeyValue{}\n\tvar fm sync.Mutex\n\tfm.Lock()\n\tfor _, tf := range tfl {\n\t\tfile, err := os.Open(tf)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot open %v\", tf)\n\t\t}\n\t\tdec := json.NewDecoder(file)\n\t\tfor {\n\t\t\tvar kv KeyValue\n\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tintermediate1 = append(intermediate1, kv)\n\t\t}\n\t}\n\tsort.Sort(ByKey(intermediate1))\n\n\tfm.Unlock()\n\ti := 0\n\tfor i < len(intermediate1) {\n\t\tj := i + 1\n\t\tfor j < len(intermediate1) && intermediate1[j].Key == intermediate1[i].Key {\n\t\t\tj++\n\t\t}\n\t\tvalues := []string{}\n\t\tfor k := i; k < j; k++ {\n\t\t\tvalues = append(values, intermediate1[k].Value)\n\t\t}\n\t\toutput := reducef(intermediate1[i].Key, values)\n\n\t\t// this is the correct format for each line of Reduce output.\n\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate1[i].Key, output)\n\n\t\ti = j\n\t}\n\tfor _, f := range tfl {\n\t\tos.Remove(f)\n\t}\n\tofile.Close()\n\tCallNotify(\"wc\", 0)\n\n}", "func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\toutFile string, // write the output here\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\tif(!Exists(outFile)) {\n\t\tos.Create(outFile)\n\t}\n\tout, _ := os.OpenFile(outFile, os.O_RDWR|os.O_APPEND, 0660)\n\n\tfor mapId := 0; mapId < nMap; mapId ++ {\n\t\tintermediateFileName := reduceName(jobName, mapId, reduceTaskNumber)\n\t\t//debug(\"In the reduce func file name = %s\\n\", intermediateFileName)\n\t\treadFile, _ := os.Open(intermediateFileName)\n\n\t\tdec := json.NewDecoder(readFile)\n\t\tenc := json.NewEncoder(out)\n\t\tkvMap := make(map[string] []string)\n\n\t\tfor {\n\t\t\tvar myJson KeyValue\n\t\t\tif err := dec.Decode(&myJson); err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\t_, ok := kvMap[myJson.Key]\n\t\t\tif !ok {\n\t\t\t\tkvMap[myJson.Key] = make([]string, 3)\n\t\t\t}\n\t\t\tkvMap[myJson.Key] = append(kvMap[myJson.Key], myJson.Value)\n\n\t\t}\n\t\tfor k, v := range kvMap {\n\t\t\tgenValue := reduceF(k, v)\n\t\t\tenc.Encode(KeyValue{k, genValue})\n\t\t}\n\n\t\treadFile.Close()\n\t}\n\tout.Close()\n\n\t//\n\t// You will need to write this function.\n\t//\n\t// You'll need to read one intermediate file from each map task;\n\t// reduceName(jobName, m, reduceTaskNumber) yields the file\n\t// name from map task m.\n\t//\n\t// Your doMap() encoded the key/value pairs in the intermediate\n\t// files, so you will need to decode them. If you used JSON, you can\n\t// read and decode by creating a decoder and repeatedly calling\n\t// .Decode(&kv) on it until it returns an error.\n\t//\n\t// You may find the first example in the golang sort package\n\t// documentation useful.\n\t//\n\t// reduceF() is the application's reduce function. You should\n\t// call it once per distinct key, with a slice of all the values\n\t// for that key. reduceF() returns the reduced value for that key.\n\t//\n\t// You should write the reduce output as JSON encoded KeyValue\n\t// objects to the file named outFile. We require you to use JSON\n\t// because that is what the merger than combines the output\n\t// from all the reduce tasks expects. There is nothing special about\n\t// JSON -- it is just the marshalling format we chose to use. Your\n\t// output code will look something like this:\n\t//\n\t// enc := json.NewEncoder(file)\n\t// for key := ... {\n\t// \tenc.Encode(KeyValue{key, reduceF(...)})\n\t// }\n\t// file.Close()\n\t//\n}", "func (job *MapOnlyJob) Run() error {\n\tif job.NewMapperF == nil {\n\t\treturn errors.New(\"MapOnlyJob: NewMapperF undefined!\")\n\t}\n\tif job.Source == nil {\n\t\treturn errors.New(\"MapOnlyJob: Source undefined!\")\n\t}\n\ttotalPart := 0\n\tendss := make([][]chan error, 0, len(job.Source))\n\tfor i := range job.Source {\n\t\tpartCount, err := job.Source[i].PartCount()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tends := make([]chan error, 0, partCount)\n\t\tfor part := 0; part < partCount; part++ {\n\t\t\tend := make(chan error, 1)\n\t\t\tends = append(ends, end)\n\t\t\tgo func(i, part, totalPart int, end chan error) {\n\t\t\t\tend <- func() error {\n\t\t\t\t\tmapper := job.NewMapperF(i, part)\n\t\t\t\t\tkey, val := mapper.NewKey(), mapper.NewVal()\n\t\t\t\t\tcs := make([]sophie.Collector, 0, len(job.Dest))\n\t\t\t\t\tfor _, dst := range job.Dest {\n\t\t\t\t\t\tc, err := dst.Collector(totalPart)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"open collector for source %d part %d failed\", i, part)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefer c.Close()\n\t\t\t\t\t\tcs = append(cs, c)\n\t\t\t\t\t}\n\t\t\t\t\titer, err := job.Source[i].Iterator(part)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \" open source %d part %d failed\", i, part)\n\t\t\t\t\t}\n\t\t\t\t\tdefer iter.Close()\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tif err := iter.Next(key, val); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) != io.EOF {\n\t\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"next failed\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err := mapper.Map(key, val, cs); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) == EOM {\n\t\t\t\t\t\t\t\tlog.Print(\"EOM returned, exit early\")\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"mapping %v %v failed\", key, val)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn errorsp.WithStacksAndMessage(mapper.MapEnd(cs), \"map end failed\")\n\t\t\t\t}()\n\t\t\t}(i, part, totalPart, end)\n\t\t\ttotalPart++\n\t\t}\n\t\tendss = append(endss, ends)\n\t}\n\tvar errReturned error\n\tfor _, ends := range endss {\n\t\tfor part, end := range ends {\n\t\t\tlog.Printf(\"Waiting for mapper %d...\", part)\n\t\t\tif err := <-end; err != nil {\n\t\t\t\tlog.Printf(\"Error returned for part %d: %v\", part, err)\n\t\t\t\terrReturned = err\n\t\t\t}\n\t\t\tlog.Printf(\"No error for mapper %d...\", part)\n\t\t}\n\t}\n\treturn errReturned\n}", "func (m *FlowMapper) Run() {\n\tm.flowMap = ReadFlowMap(m.mapfile)\n\tzips := GetZipNames(m.workdir)\n\tfor _, name := range zips {\n\t\tsourcePath := filepath.Join(m.workdir, name)\n\t\ttargetPath := filepath.Join(m.workdir, \"peflocus_\"+name)\n\t\tDeleteExisting(targetPath)\n\t\tlog.Println(\"INFO: map flows in\", sourcePath, \"to\", targetPath)\n\t\tm.doIt(sourcePath, targetPath)\n\t}\n}", "func (s *scanner) run() {\n\tfor state := scanMain; state != nil; {\n\t\tstate = state(s)\n\t}\n\tclose(s.items)\n}", "func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\t// TODO:\n\t// You will need to write this function.\n\t//你需要使用reduceName(jobName, m, reduceTaskNumber)找到这个reduce任务的中间文件\n\t//记住你需要对中间文件中的内容进行解码,\n\n\t\n\t//读取文件\n\tfmt.Printf(\"mergeFile: %d %d\\n\",nMap,reduceTaskNumber);\n\n\t//根据nMap遍历一下文件,多个map生成针对reduce的文件,\n\t//读取后解析为key value,保存在map中,key string, values []string\n\t//最后,针对每个key调用reduceF,获取结果string,即:key-value\n\t//将最后的key-value写入mergeName的文件中即可\n\tvar maps=make(map[string] []string) //创建一个数组,保存了key-对应关系,给reduceF使用\n\t//var list=make([]KeyValue,10)\n\tfor i :=0;i<nMap;i++ {\n\t\tmergeFile :=reduceName(jobName, i, reduceTaskNumber);\n\t\t//读取这个文件内容,再decode下\n\t\t// fmt.Printf(\"mergeFile: %s\\n\",mergeFile);\n\t\tinfile, _ := os.Open(mergeFile)\t\n\t\tdec := json.NewDecoder(infile)\n\n\t\tvar v KeyValue\n\t\t\n\t\terr :=dec.Decode(&v); \n\t\t// fmt.Printf(\"Decode: %s %s\\n\",v.Key,err==nil);\n\t\tfor err==nil{\n\t\t\t//fmt.Printf(\"Decode: %s %s\\n\",v.Key,err);\n\n\t\t\t_,ok :=maps[v.Key]\n\t\t\tif !ok{//未找到\n\t\t\t\tmaps[v.Key]=make([]string,0,1000)\n\t\t\t}\n\t\t\tmaps[v.Key]=append(maps[v.Key],v.Value)\n\t\t\t//继续decode\n\t\t\terr =dec.Decode(&v); \n\t\t}\n\n\t\t//解析结果\n\t\tdefer infile.Close()\n\t}\n\n\n\tredcueFN :=mergeName(jobName,reduceTaskNumber);\n\tredcueOF,_ :=os.Create(redcueFN)\t\n\tenc := json.NewEncoder(redcueOF)\n\n\t//遍历maps\n\tfor nkey, nvalue := range maps {\n\t\t// fmt.Printf(\"maps: %s %s\\n\",nkey,nvalue);\n\t\trs :=reduceF(nkey,nvalue)\n\t\t// list=append(list,KeyValue{nkey,rs})\n\t\tenc.Encode(KeyValue{nkey,rs})\n\t}\n \tredcueOF.Close()\n\n\t// You can find the intermediate file for this reduce task from map task number\n\t// m using reduceName(jobName, m, reduceTaskNumber).\n\t// Remember that you've encoded the values in the intermediate files, so you\n\t// will need to decode them. If you chose to use JSON, you can read out\n\t// multiple decoded values by creating a decoder, and then repeatedly calling\n\t// .Decode() on it until Decode() returns an error.\n\t\n\t//你需要将reduce的输出KeyValue转为JSON并写入文件mergeName()\n\t//\n\t// You should write the reduced output in as JSON encoded KeyValue\n\t// objects to a file named mergeName(jobName, reduceTaskNumber). We require\n\t// you to use JSON here because that is what the merger than combines the\n\t// output from all the reduce tasks expects. There is nothing \"special\" about\n\t// JSON -- it is just the marshalling format we chose to use. It will look\n\t// something like this:\n\t//\n\t// mergeFile :=mergeName(jobName,reduceTaskNumber);\n\t// enc := json.NewEncoder(mergeFile)\n\t// for key in ... {\n\t// \tenc.Encode(KeyValue{key, reduceF(...)})\n\t// }\n\t// file.Close()\n}", "func doMap(\n\tjobName string, // the name of the MapReduce job\n\tmapTaskNumber int, // which map task this is\n\tinFile string,\n\tnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\tstream, err := ioutil.ReadFile(inFile)\n\tcheck_error(err)\n\n\tkeyVals := mapF(inFile, string(stream))\n\t\n\tresults := make(map[int][]KeyValue)\n\tfor _, kv := range keyVals {\n\t\t// Calculate R\n\t\tr := ihash(kv.Key) % nReduce\n\n\t\t// Map the results internally\n\t\tresults[r] = append(results[r], kv)\n\t}\n\n\tfor r, keyVals := range results {\n\t\toutputFileName := reduceName(jobName, mapTaskNumber, r)\n\t\tfile, err := os.Create(outputFileName)\n\t\tcheck_error(err)\n\t\tenc := json.NewEncoder(file)\n\n\t\tfor _, kv := range keyVals {\n\t\t\terr := enc.Encode(&kv)\n\t\t\tcheck_error(err)\n\t\t}\n\n\t\tfile.Close()\n\t}\n}", "func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\treduceOutputName := mergeName(jobName, reduceTaskNumber)\n\troutputfile, err := os.Create(reduceOutputName)\n\t//var m map[string][]string = make(map[string][]string)\n\tcheckError(err)\n\tdefer routputfile.Close()\n\tkeyValMap := make(map[string][]string)\n\tfor i := 0; i < nMap; i++ {\n\t\tfileName := reduceName(jobName, i, reduceTaskNumber)\n\t\tfmt.Println(\"The reduced task fileName is \", fileName)\n\t\tdat, err := ioutil.ReadFile(fileName)\n\t\tcheckError(err)\n\t\tdec := json.NewDecoder(strings.NewReader(string(dat)))\n\t\tfor {\n\t\t\tvar m KeyValue\n\t\t\terr = dec.Decode(&m)\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcheckError(err)\n\t\t\t}\n\t\t\tkeyValMap[m.Key] = append(keyValMap[m.Key], m.Value)\n\t\t}\n\t}\n\n\tenc := json.NewEncoder(routputfile)\n\tfor k, v := range keyValMap {\n\t\terr := enc.Encode(KeyValue{k, reduceF(k, v)})\n\t\tcheckError(err)\n\t}\n\n}", "func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\n\t//Read the keyValues of reducer on data structure\n\tjsonMap := make(map[string][]KeyValue)\n\tfor m := 0; m < nMap; m++ {\n\n\t\tfilename := reduceName(jobName, m, reduceTaskNumber)\n\n\t\tfile, err := os.Open(filename)\n\t\tcheckError(err)\n\n\t\tvar kv []KeyValue\n\t\tencoder := json.NewDecoder(file)\n\t\terr = encoder.Decode(&kv)\n\t\tcheckError(err)\n\n\t\tjsonMap[filename] = kv\n\t}\n\n\t//Create the encode data\n\treduceMap := make(map[string][]string)\n\tfor _, kvs := range jsonMap {\n\t\tfor _, kv := range kvs {\n\t\t\treduceMap[kv.Key] = append(reduceMap[kv.Key], kv.Value)\n\t\t}\n\t}\n\n\t//Create the output file\n\toutputFile := mergeName(jobName, reduceTaskNumber)\n\tfile, err := os.OpenFile(outputFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)\n\tcheckError(err)\n\tencoder := json.NewEncoder(file)\n\n\t//Call the reducer with the actual data\n\tfor key, values := range reduceMap {\n\t\tres := reduceF(key, values)\n\t\tencoder.Encode(KeyValue{key, res})\n\t}\n\n\tfile.Close()\n}", "func doMap(\n\tjobName string, // the name of the MapReduce job\n\tmapTaskNumber int, // which map task this is\n\tinFile string,\n\tnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\tcontent, err := ioutil.ReadFile(inFile)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tkeyValues := mapF(inFile, string(content))\n\treduceFiles := make(map[string]*os.File)\n\n\tfor _, kv := range keyValues {\n\t\treduceTaskNumber := ihash(kv.Key) % nReduce\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\treduceFileName := reduceName(jobName, mapTaskNumber, reduceTaskNumber)\n\n\t\tif reduceFiles[reduceFileName] == nil {\n\t\t\tf, err := os.OpenFile(reduceFileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\treduceFiles[reduceFileName] = f\n\t\t}\n\n\t\tf := reduceFiles[reduceFileName]\n\t\tenc := json.NewEncoder(f)\n\t\tenc.Encode(&kv)\n\t}\n\n\tfor _, f := range reduceFiles {\n\t\tf.Close()\n\t}\n}", "func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\toutFile string, // write the output here\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\tmidContentBuf := bytes.NewBuffer(nil)\n\tfor maps := 0; maps < nMap; maps++ {\n\t\tf, err := os.Open(reduceName(jobName, maps, reduceTaskNumber))\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\treturn\n\t\t}\n\t\tio.Copy(midContentBuf, f)\n\t\tf.Close()\n\t}\n\tdecoder := json.NewDecoder(bytes.NewReader(midContentBuf.Bytes()))\n\tvar kv KeyValue\n\tkeyValueMap := make(map[string][]string)\n\tfor {\n\t\terr := decoder.Decode(&kv)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tkeyValueMap[kv.Key] = append(keyValueMap[kv.Key], kv.Value)\n\t}\n\tkeys := []string{}\n\tfor keyValueSingle := range keyValueMap {\n\t\tkeys = append(keys, keyValueSingle)\n\t}\n\tsort.Strings(keys)\n\tanswerFileName := mergeName(jobName, reduceTaskNumber)\n\tanswerFile, err := os.OpenFile(answerFileName, os.O_CREATE|os.O_WRONLY, 0644)\n\tdefer answerFile.Close()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn\n\t}\n\tencoder := json.NewEncoder(answerFile)\n\tfor _, key := range keys {\n\t\tencoder.Encode(KeyValue{key, reduceF(key, keyValueMap[key])})\n\t}\n}", "func (r *ride) run(ctx context.Context, outc chan<- pipeline.Event) error {\n\tpositions, errc := pipeline.Generate(ctx, r.positions)\n\tsegments, errc1 := pipeline.Reduce(ctx, positions, r.segments)\n\ttotal, err := r.fare(ctx, segments)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrm := pipeline.MergeErrors(ctx, errc, errc1)\n\tfor err := range errm {\n\t\tswitch {\n\t\tcase err == ErrLinesEmpty:\n\t\tcase err != nil:\n\t\t\treturn err\n\t\t}\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\tcase outc <- total:\n\t}\n\n\treturn nil\n}", "func (a *aggregator) Run() {\n\tgo a.submitter()\n\n\tfor m := range a.in {\n\t\tfor _, out_m := range a.process(m) {\n\t\t\ta.out <- out_m\n\t\t}\n\t}\n}", "func (l *lexer) run() {\nmainLoop:\n\tfor {\n\t\tif !processWhitespace(l) {\n\t\t\tbreak\n\t\t}\n\t\t//fmt.Println(\"testing\", string(l.peek()))\n\t\tfound := false\n\tprocessLoop:\n\t\tfor _, processFunc := range processFunctions {\n\t\t\t//fmt.Println(\"func =\", processFunc)\n\t\t\tresult := processFunc(l)\n\t\t\t//fmt.Println(\"peek = \", string(l.peek()))\n\t\t\tswitch result {\n\t\t\tcase resultMatch:\n\t\t\t\tfound = true\n\t\t\t\tbreak processLoop\n\t\t\tcase resultMatchError:\n\t\t\t\tbreak mainLoop\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tl.errorf(\"Invalid token: '%s'\", string(l.peek()))\n\t\t\tbreak\n\t\t}\n\t}\n\tl.emit(itemEOF)\n\tclose(l.items)\n}", "func (l *Lexer) run() {\n\tfor state := lexAction; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.tokens)\n}", "func (gm *gmap) run() {\n\t// Destruct gmap before exit.\n\tdefer func() {\n\t\tgm.raft.Stop()\n\t\tclose(gm.done)\n\t}()\n\t// Start gmap raft node.\n\tgo gm.raft.run()\n\t// Apply entries and snapshot get from raft.\n\tvar gmp gmapProgress\n\tfor {\n\t\tselect {\n\t\t// New apply.\n\t\tcase ap := <-gm.raft.applyc:\n\t\t\tgm.applyAll(&gmp, &ap)\n\t\t// gmap is closed.\n\t\tcase <-gm.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\n\t//A string array of KEYVALUE PAIRS PRODUCED BY THE MAP FUNCTION\n\treduceBuffer := make(map[string][]string)\n\n\t//for every map task \n\tfor i:= 0; i<nMap; i++ {\n\n\t\t//find the intermediate file for this reduce task from map task num m using reduceNam! \n\t\t//fileMapping --> all intermediate file produced \n\t\tfile := reduceName(jobName, i, reduceTaskNumber)\n\t\tfileMapping, err := os.Open(file)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err) } \n\t\n\t//need to decode the encoded intermediate file \n\t//\"repeatedly calling .Decode() on it until Decode() returns an error.\"\n\t//from https://blog.golang.org/go-maps-in-action. Basic idea is the \n\tdec := json.NewDecoder(fileMapping)\n\n\t//next few steps --> to sort the intermediate key & value pairs by keys \n\tfor { \n\t\tvar kv KeyValue \n\t\t//opposite to what occured in common_map.go \"err := enc.Encode(&kvPair) \n\t\terr = dec.Decode(&kv) \n\t\tif err != nil {\n\t\t\tbreak } \n\n\t//if a keyValue has been decoded, add it to kvList!! \n\t//Step 1: check if key already exists in the list. Step 2: if key does not exist\n\t//assign its value to empty!! Step 3: if key alreayd exists, add value to exitsting index\n\t////addapted from: http://stackoverflow.com/questions/2050391/how-to-check\n //if-a-map-contains-a-key-in-go\n\n\tif _, ok := reduceBuffer[kv.Key]; !ok {\n\t\treduceBuffer[kv.Key] = []string{} \n\t}else {\n\t\treduceBuffer[kv.Key] = append(reduceBuffer[kv.Key], kv.Value)\n\t}\n\t}\n\n\tfileMapping.Close()\n\n\t//here adding all of the keys from the mapped intermediate file\n var totalKeys []string\n for k := range reduceBuffer {\n totalKeys = append(totalKeys, k)\n }\n\n\t//Since the list has all KeyValue pairs --> from map, now need to merge aka reduce \n\tmergeFile, err := os.Create(mergeName(jobName, reduceTaskNumber))\n\tif err != nil { \n\t\tlog.Fatal(err) } \n\n\t//write the reduced output in as JSON encoded KeyValue objects to a file named mergeName\n\tenc := json.NewEncoder(mergeFile) \n\n\t//Last step! --> loop over every key and for each call reduceF \n\tfor _,key := range totalKeys {\n\t\tr := reduceF(key, reduceBuffer[key])\n\t\tenc.Encode(KeyValue{key,r})\n\t}\n\n\tmergeFile.Close()\n\t\n\t// TODO:\n\t// You will need to write this function.\n\t// You can find the intermediate file for this reduce task from map task number\n\t// m using reduceName(jobName, m, reduceTaskNumber).\n\t// Remember that you've encoded the values in the intermediate files, so you\n\t// will need to decode them. If you chose to use JSON, you can read out\n\t// multiple decoded values by creating a decoder, and then repeatedly calling\n\t// .Decode() on it until Decode() returns an error.\n\t//\n\t// You should write the reduced output in as JSON encoded KeyValue\n\t// objects to a file named mergeName(jobName, reduceTaskNumber). We require\n\t// you to use JSON here because that is what the merger than combines the\n\t// output from all the reduce tasks expects. There is nothing \"special\" about\n\t// JSON -- it is just the marshalling format we chose to use. It will look\n\t// something like this:\n\t//\n\t// enc := json.NewEncoder(mergeFile)\n\t// for key in ... {\n\t// \tenc.Encode(KeyValue{key, reduceF(...)})\n\t// }\n\t// file.Close()\n}\n}", "func (l *lexer) run() {\n\tfor l.state = lexRule; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n\tclose(l.items)\n}", "func (l *lexer) run() {\n\tfor l.state = lexAll; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n\tclose(l.Items)\n}", "func (l *Lexer) run() {\n\tfor state := l.state; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items)\n}", "func (p *literalProcessor) run() {\n\tfor {\n\t\tselect {\n\t\tcase ch := <-p.done:\n\t\t\tclose(ch)\n\t\t\treturn\n\t\tcase p.c <- map[string]interface{}{\"\": p.val}:\n\t\t}\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\treply := GetTask()\n\tid := reply.Id\n\tfilename := reply.Filename\n\n\tfor {\n\t\tif filename == \"error\" {\n\t\t\t//fmt.Printf(\"Error getting filename from master\\n\")\n\t\t\t//return\n\t\t\ttime.Sleep(1000 * time.Millisecond)\n\t\t\treply = GetTask()\n\t\t\tid = reply.Id\n\t\t\tfilename = reply.Filename\n\t\t}\n\t\t// fmt.Printf(\"Worker received filename: %s\\n\", filename)\n\n\t\tvar intermediate []KeyValue\n\t\t//intermediate := []KeyValue{}\n\n\t\tif reply.Type == \"map\" {\n\t\t\tfile, err := os.Open(filename)\n\t\t\tif err != nil {\n\t\t\t\t//log.Fatalf(\"cannot open %v\", filename)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\", filename)\n\t\t\t}\n\t\t\tfile.Close()\n\n\t\t\tkva := mapf(filename, string(content))\n\t\t\tintermediate = append(intermediate, kva...)\n\t\t\tWriteIntermediate(intermediate, id, reply.NReduce)\n\t\t\tCompleteMapTask(id)\n\t\t} else if reply.Type == \"reduce\" {\n\t\t\tfor _, reduce_filename := range reply.FileList {\n\t\t\t\tfile, err := os.Open(reduce_filename)\n\t\t\t\tif err != nil {\n\t\t\t\t\t//log.Fatalf(\"cannot open %v\", reduce_filename)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdec := json.NewDecoder(file)\n\t\t\t\tfor {\n\t\t\t\t\tvar kv KeyValue\n\t\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tintermediate = append(intermediate, kv)\n\t\t\t\t}\n\t\t\t\tfile.Close()\n\t\t\t\tdefer os.Remove(reduce_filename)\n\t\t\t}\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\t// fmt.Println(intermediate)\n\t\t\ts := []string{\"mr-out\", \"-\", strconv.Itoa(reply.Id)}\n\t\t\toname := strings.Join(s, \"\")\n\t\t\t// oname := \"mr-out-0\"\n\t\t\tofile, _ := os.Create(oname)\n\n\t\t\t//\n\t\t\t// call Reduce on each distinct key in intermediate[],\n\t\t\t// and print the result to mr-out-0.\n\t\t\t//\n\t\t\ti := 0\n\t\t\tfor i < len(intermediate) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(intermediate) && intermediate[j].Key == intermediate[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, intermediate[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(intermediate[i].Key, values)\n\n\t\t\t\t// this is the correct format for each line of Reduce output.\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t\tCompleteReduceTask(id)\n\t\t} else if reply.Type == \"exit\" {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\treply = GetTask()\n\t\tid = reply.Id\n\t\tfilename = reply.Filename\n\t\tintermediate = []KeyValue{}\n\t}\n\n}", "func (l *Lexer) run() {\n\tdefer close(l.items)\n\teor := len(l.rec.States) - 1\n\tfor {\n\t\tfor i, state := range l.rec.States {\n\t\t\tif !state.StateFn(l, state.ItemType, state.Emit) {\n\t\t\t\tl.rec.ErrorFn(l)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif i == eor || l.eof {\n\t\t\t\tl.Emit(ItemEOR)\n\t\t\t}\n\t\t}\n\t\tif l.Peek() == EOF {\n\t\t\tl.Emit(ItemEOF)\n\t\t\tbreak\n\t\t}\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\tfor {\n\t\thargs := HandlerArgs{}\n\t\threply := HandlerReply{}\n\n\t\tcall(\"Coordinator.Handler\", &hargs, &hreply)\n\t\t//log.Println(\"hreply\", hreply)\n\t\tif hreply.JobType == \"map\" {\n\n\t\t\tfile, err := os.Open(hreply.MapFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot open %v\", hreply.MapFile)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\", hreply.MapFile)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t\tkva := mapf(hreply.MapFile, string(content))\n\n\t\t\ttotal := []*json.Encoder{}\n\n\t\t\tfor i := 0; i < hreply.ReduceNum; i++ {\n\t\t\t\ttmp, err := os.Create(fmt.Sprintf(\"mr-%v-%v.json\", hreply.MapIndex, i))\n\t\t\t\tdefer tmp.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tenc := json.NewEncoder(tmp)\n\t\t\t\ttotal = append(total, enc)\n\t\t\t}\n\n\t\t\tfor _, onekva := range kva {\n\t\t\t\tcurr := total[ihash(onekva.Key)%10]\n\t\t\t\tcurr.Encode(&onekva)\n\t\t\t}\n\t\t\tlog.Printf(\"map job mr-%v finished\", hreply.MapIndex)\n\n\t\t\tnargs := NotifyArgs{}\n\t\t\tnreply := NotifyReply{}\n\t\t\tnargs.NotifyType = \"map\"\n\t\t\tnargs.NotifyIndex = hreply.MapIndex\n\n\t\t\tcall(\"Coordinator.Notify\", &nargs, &nreply)\n\n\t\t} else if hreply.JobType == \"reduce\" {\n\n\t\t\tkva := []KeyValue{}\n\t\t\tfor i := 0; i < hreply.MapNum; i++ {\n\t\t\t\ttmp, err := os.Open(fmt.Sprintf(\"mr-%v-%v.json\", i, hreply.ReduceIndex))\n\t\t\t\tdefer tmp.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\tdec := json.NewDecoder(tmp)\n\n\t\t\t\tfor {\n\t\t\t\t\tvar kv KeyValue\n\t\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tkva = append(kva, kv)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsort.Sort(ByKey(kva))\n\t\t\toname := fmt.Sprintf(\"mr-out-%v\", hreply.ReduceIndex)\n\t\t\tofile, _ := os.Create(oname)\n\n\t\t\ti := 0\n\t\t\tfor i < len(kva) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t\t// this is the correct format for each line of Reduce output.\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t\tlog.Printf(\"reduce job mr-%v finished\", hreply.ReduceIndex)\n\n\t\t\tnargs := NotifyArgs{}\n\t\t\tnreply := NotifyReply{}\n\t\t\tnargs.NotifyType = \"reduce\"\n\t\t\tnargs.NotifyIndex = hreply.ReduceIndex\n\n\t\t\tcall(\"Coordinator.Notify\", &nargs, &nreply)\n\n\t\t} else if hreply.JobType == \" retry\" {\n\t\t\t//log.Println(\"retry--------------\")\n\t\t} else if hreply.JobType == \"alldone\" {\n\t\t\tos.Exit(0)\n\t\t} else {\n\t\t\t//log.Println(\"sleeping 1 second\")\n\t\t\ttime.Sleep(100 * time.Microsecond)\n\t\t}\n\t}\n\t// uncomment to send the Example RPC to the coordinator.\n\t// CallExample()\n\n}", "func doMap(\njobName string, // the name of the MapReduce job\nmapTaskNumber int, // which map task this is\ninFile string,\nnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\nmapF func(file string, contents string) []KeyValue,\n) {\n\t// TODO:\n\t// You will need to write this function.\n\t// You can find the filename for this map task's input to reduce task number\n\t// r using reduceName(jobName, mapTaskNumber, r). The ihash function (given\n\t// below doMap) should be used to decide which file a given key belongs into.\n\t//\n\t// The intermediate output of a map task is stored in the file\n\t// system as multiple files whose name indicates which map task produced\n\t// them, as well as which reduce task they are for. Coming up with a\n\t// scheme for how to store the key/value pairs on disk can be tricky,\n\t// especially when taking into account that both keys and values could\n\t// contain newlines, quotes, and any other character you can think of.\n\t//\n\t// One format often used for serializing data to a byte stream that the\n\t// other end can correctly reconstruct is JSON. You are not required to\n\t// use JSON, but as the output of the reduce tasks *must* be JSON,\n\t// familiarizing yourself with it here may prove useful. You can write\n\t// out a data structure as a JSON string to a file using the commented\n\t// code below. The corresponding decoding functions can be found in\n\t// common_reduce.go.\n\t//\n\t// enc := json.NewEncoder(file)\n\t// for _, kv := ... {\n\t// err := enc.Encode(&kv)\n\t//\n\t// Remember to close the file after you have written all the values!\n\n\n\t//setp 1 read file\n\tcontents, err := ioutil.ReadFile(inFile)\n\tif err != nil {\n\t\tlog.Fatal(\"do map error for inFile \",err)\n\t}\n\t//setp 2 call user user-map method ,to get kv\n\tkvResult := mapF(inFile, string(contents))\n\n\t/**\n\t * setp 3 use key of kv generator nReduce file ,partition\n\t * a. create tmpFiles\n\t * b. create encoder for tmpFile to write contents\n\t * c. partition by key, then write tmpFile\n\t */\n\n\tvar tmpFiles [] *os.File = make([] *os.File, nReduce)\n\tvar encoders [] *json.Encoder = make([] *json.Encoder, nReduce)\n\n\tfor i := 0; i < nReduce; i++ {\n\t\ttmpFileName := reduceName(jobName,mapTaskNumber,i)\n\t\ttmpFiles[i],err = os.Create(tmpFileName)\n\t\tif err!=nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tdefer tmpFiles[i].Close()\n\t\tencoders[i] = json.NewEncoder(tmpFiles[i])\n\t\tif err!=nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tfor _ , kv := range kvResult {\n\t\thashKey := int(ihash(kv.Key)) % nReduce\n\t\terr := encoders[hashKey].Encode(&kv)\n\t\tif err!=nil {\n\t\t\tlog.Fatal(\"do map encoders \",err)\n\t\t}\n\t}\n\n}", "func (p *parser) run() {\n\tfor parserState := parseStart; parserState != nil; {\n\t\tparserState = parserState(p)\n\t}\n\tclose(p.records)\n}", "func (lx *lexer) run() {\n\tfor state := lxBase; state != nil; {\n\t\tstate = state(lx)\n\t}\n\tclose(lx.tokStream)\n}", "func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\n\tkeyValues := make(map[string][]string)\n\ti := 0\n\tfor i < nMap {\n\t\tfileName := reduceName(jobName, i, reduceTaskNumber)\n\t\tfile, ferr := os.Open(fileName)\n\t\tif ferr != nil {\n\t\t\tlog.Fatal(ferr)\n\t\t}\n\t\tdecoder := json.NewDecoder(file)\n\n\t\tfor {\n\t\t\tvar kv KeyValue\n\t\t\tderr := decoder.Decode(&kv)\n\t\t\tif derr != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, ok := keyValues[kv.Key]\n\t\t\tif !ok {\n\t\t\t\tkeyValues[kv.Key] = make([]string, 0)\n\t\t\t}\n\t\t\tkeyValues[kv.Key] = append(keyValues[kv.Key], kv.Value)\n\t\t}\n\t\ti++\n\t}\n\n\tvar keys []string\n\tfor k := range keyValues {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\n\tmfile, merr := os.Create(mergeName(jobName, reduceTaskNumber))\n\tif merr != nil {\n\t\tlog.Fatal(merr)\n\t}\n\tenc := json.NewEncoder(mfile)\n\tfor _, k := range keys {\n\t\tenc.Encode(KeyValue{k, reduceF(k, keyValues[k])})\n\t}\n\tmfile.Close()\n}", "func (e *binaryExprEvaluator) run() {\n\tfor {\n\t\t// Read LHS value.\n\t\tlhs, ok := <-e.lhs.C()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\t// Read RHS value.\n\t\trhs, ok := <-e.rhs.C()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\t// Merge maps.\n\t\tm := make(map[string]interface{})\n\t\tfor k, v := range lhs {\n\t\t\tm[k] = e.eval(v, rhs[k])\n\t\t}\n\t\tfor k, v := range rhs {\n\t\t\t// Skip value if already processed in lhs loop.\n\t\t\tif _, ok := m[k]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm[k] = e.eval(float64(0), v)\n\t\t}\n\n\t\t// Return value.\n\t\te.c <- m\n\t}\n\n\t// Mark the channel as complete.\n\tclose(e.c)\n}", "func (l *lexer) run() {\n\tfor state := lexBlock; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items) // No more tokens will be delivered.\n}", "func (theBoss *theBoss) mapReads() error {\n\ttheBoss.alignments = make(chan *sam.Record, BUFFERSIZE)\n\n\t// set up the BAM if exact alignment is requested\n\tif !theBoss.info.Sketch.NoExactAlign {\n\t\tif err := theBoss.setupBAM(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// setup the waitgroups for the sketching and graphing minions\n\tvar wg1 sync.WaitGroup\n\tvar wg2 sync.WaitGroup\n\n\t// launch the graph minions (one minion per graph in the index)\n\ttheBoss.graphMinionRegister = make([]*graphMinion, len(theBoss.info.Store))\n\tfor _, graph := range theBoss.info.Store {\n\n\t\t// create, start and register the graph minion\n\t\tminion := newGraphMinion(theBoss, graph)\n\t\twg2.Add(1)\n\t\tminion.start(&wg2)\n\t\ttheBoss.graphMinionRegister[graph.GraphID] = minion\n\t}\n\n\t// launch the sketching minions (one per CPU)\n\tfor i := 0; i < theBoss.info.NumProc; i++ {\n\t\twg1.Add(1)\n\t\tgo func(workerNum int) {\n\t\t\tdefer wg1.Done()\n\n\t\t\t// keep a track of what this minion does\n\t\t\treceivedReads := 0\n\t\t\tmappedCount := 0\n\t\t\tmultimappedCount := 0\n\n\t\t\t// start the main processing loop\n\t\t\tfor {\n\n\t\t\t\t// pull reads from queue until done\n\t\t\t\tread, ok := <-theBoss.reads\n\t\t\t\tif !ok {\n\n\t\t\t\t\t// update the counts\n\t\t\t\t\ttheBoss.Lock()\n\t\t\t\t\ttheBoss.receivedReadCount += receivedReads\n\t\t\t\t\ttheBoss.mappedCount += mappedCount\n\t\t\t\t\ttheBoss.multimappedCount += multimappedCount\n\t\t\t\t\ttheBoss.Unlock()\n\n\t\t\t\t\t// end the sketching minion\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// get sketch for read\n\t\t\t\treadSketch, err := read.RunMinHash(theBoss.info.KmerSize, theBoss.info.SketchSize, false, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// get the number of k-mers in the sequence\n\t\t\t\tkmerCount := (len(read.Seq) - theBoss.info.KmerSize) + 1\n\n\t\t\t\t// query the LSH ensemble\n\t\t\t\tresults, err := theBoss.info.db.Query(readSketch, kmerCount, theBoss.info.ContainmentThreshold)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// if multiple graphs are returned, we need to deep copy the read\n\t\t\t\tdeepCopy := false\n\t\t\t\tif len(results) > 1 {\n\t\t\t\t\tdeepCopy = true\n\t\t\t\t}\n\n\t\t\t\t// augment graphs and optionally perform exact alignment\n\t\t\t\tfor graphID, hits := range results {\n\t\t\t\t\tif deepCopy {\n\t\t\t\t\t\treadCopy := *read.DeepCopy()\n\t\t\t\t\t\ttheBoss.graphMinionRegister[graphID].inputChannel <- &graphMinionPair{hits, readCopy}\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttheBoss.graphMinionRegister[graphID].inputChannel <- &graphMinionPair{hits, *read}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// update counts\n\t\t\t\treceivedReads++\n\t\t\t\tif len(results) > 0 {\n\t\t\t\t\tmappedCount++\n\t\t\t\t}\n\t\t\t\tif len(results) > 1 {\n\t\t\t\t\tmultimappedCount++\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// control the channels\n\tgo func() {\n\n\t\t// wait for the sketching minions to finish\n\t\twg1.Wait()\n\n\t\t// shut down the graph minions input channels\n\t\tfor _, minion := range theBoss.graphMinionRegister {\n\t\t\tclose(minion.inputChannel)\n\t\t}\n\n\t\t// wait for the graph minions to finish\n\t\twg2.Wait()\n\n\t\t// end the alignment writer\n\t\tclose(theBoss.alignments)\n\n\t}()\n\n\t// collect the alignments and write them\n\tfor record := range theBoss.alignments {\n\t\t// check the record is valid\n\t\t//if sam.IsValidRecord(record) == false {\n\t\t//\tos.Exit(1)\n\t\t//}\n\t\ttheBoss.alignmentCount++\n\t\tif err := theBoss.bamwriter.Write(record); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// close the bam writer and return to the completed boss to the pipeline\n\tvar err error\n\tif !theBoss.info.Sketch.NoExactAlign {\n\t\terr = theBoss.bamwriter.Close()\n\t}\n\treturn err\n}", "func (w *WemoMap) Run() {\n\tnodeMap := map[string]string{}\n\tlocations := map[string]string{}\n\tfor m := range w.Info {\n\t\tf := strings.Split(m.(string), \",\")\n\t\tnodeMap[f[0]] = f[1]\n fmt.Println(nodeMap)\n\t\tif len(f) > 2 {\n\t\t\tlocations[f[0]] = f[2]\n fmt.Println(locations)\n\t\t}\n\t}\n\n\tvar group int\n\tfor m := range w.In {\n\t\tw.Out.Send(m)\n fmt.Println(\"M: \", m)\n\t\tif data, ok := m.(map[string]int); ok {\n fmt.Println(\"Data: \", data)\n\t\t\tswitch {\n\t\t\tcase data[\"<RF12demo>\"] > 0:\n\t\t\t\tgroup = data[\"group\"]\n\t\t\tcase data[\"<node>\"] > 0:\n\t\t\t\tkey := fmt.Sprintf(\"RFg%di%d\", group, data[\"<node>\"])\n\t\t\t\tif loc, ok := locations[key]; ok {\n\t\t\t\t\tw.Out.Send(flow.Tag{\"<location>\", loc})\n\t\t\t\t}\n\t\t\t\tif tag, ok := nodeMap[key]; ok {\n\t\t\t\t\tw.Out.Send(flow.Tag{\"<dispatch>\", tag})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func Worker(mapf func(string, string) []Pair, reducef func(string, []string) string) {\n\tclient := MakeRpcClient()\n\tdefer client.Close()\n\tfor {\n\t\t// 对端的 server 如果退出了,下面这个会有什么反应\n\t\ttask := Task{TaskKind: ReduceTaskFlag, TaskId: \"10\"}\n\n\t\t// fmt.Println(\"request task\")\n\t\tstatus := client.Call(\"Coordinator.RequestTask\", struct{}{}, &task)\n\t\t// fmt.Println(\"Get response\", task)\n\t\tif status == false {\n\t\t\tbreak\n\t\t}\n\n\t\tswitch task.TaskKind {\n\t\tcase MapTaskFlag:\n\t\t\t// fmt.Println(\"get map task \", task.TaskId)\n\t\t\tintermediate := mapf(task.File, readFileToString(task.File))\n\t\t\t// fmt.Println(\"map task done\")\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\tr := MapResult{TaskId: task.TaskId, Items: divideIntoItems(intermediate)}\n\t\t\tclient.Call(\"Coordinator.UploadMapResult\", r, nil)\n\t\t\t// fmt.Println(\"map result upload\")\n\n\t\tcase ReduceTaskFlag:\n\t\t\tLog(\"get reduce task \", task.TaskId)\n\t\t\tfilename := fmt.Sprint(\"mr-out-\", task.TaskId)\n\t\t\tf, _ := os.Create(filename)\n\t\t\tdefer f.Close()\n\t\t\targFile, _ := os.Open(task.File)\n\t\t\treader := bufio.NewReader(argFile)\n\n\t\t\tfor {\n\t\t\t\tend, k, vs := readFrom(reader)\n\t\t\t\tif end {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tLog(\"reduce func call\", k)\n\t\t\t\t// fmt.Println(\"key: \", k, \"values: \", vs)\n\n\t\t\t\tv := reducef(k, vs)\n\t\t\t\tfmt.Fprintf(f, \"%v %v\\n\", k, v)\n\t\t\t}\n\t\t\tLog(\"reduce task \", task.TaskId, \"done\")\n\n\t\t\tresult := ReduceResult{TaskId: task.TaskId, Filename: filename}\n\t\t\tclient.Call(\"Coordinator.UploadReduceResult\", result, nil)\n\t\t\tLog(\"reduce task\", task.TaskId, \"result upload\")\n\n\t\tcase ShutdownFlag:\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (l *lexer) run() {\n\tfor state := lexText; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items)\n}", "func main() {\n\n //bring up the services\n\tmasterSrvAddr := master.StartMasterSrv(9090) //9090\n\tworkerSrvAddr1 := worker.StartWorkerSrv(9091); //9091 ,9092, 9093\n\tworkerSrvAddr2 := worker.StartWorkerSrv(9092);\n\tworker.StartWorkerCli(masterSrvAddr, []string{workerSrvAddr1,workerSrvAddr2});\n\tmaster.StartMasterCli();\n\n\t//distributed map-reduce flow\n\tmapOutput,err := master.DoOperation([]string{\"/Users/k0c00nc/go/src/MapReduce/res/input.txt\", \"/Users/k0c00nc/go/src/distributedDb\" +\n\t\t\"/res/input1.txt\"},\"Map\")\n\tif err !=nil{\n\t\tfmt.Printf(\"map phase failed with err %s \", err.Error())\n\t}\n\n\tlocalAggregation,err :=master.DoOperation(mapOutput,\"LocalAggregation\")\n\tif err !=nil{\n\t\tfmt.Printf(\"localAggregation phase failed with err %s \", err.Error())\n\t}\n\n\tshuffing,err :=master.DoOperation(localAggregation,\"Shuffing\")\n\tif err !=nil{\n\t\tfmt.Printf(\"shuffing phase failed with err %s \", err.Error())\n\t}\n\n\treduce,err :=master.DoOperation(shuffing,\"Reduce\")\n\tif err !=nil{\n\t\tfmt.Printf(\"reduce phase failed with err %s \", err.Error())\n\t}\n\n fmt.Println(\"MR output are in file\", reduce[0])\n\n}", "func (p *MapToKeys) Run() {\n\tdefer p.CloseAllOutPorts()\n\tfor ip := range p.In().Chan {\n\t\tnewKeys := p.mapFunc(ip)\n\t\tip.AddKeys(newKeys)\n\t\tip.WriteAuditLogToFile()\n\t\tp.Out().Send(ip)\n\t}\n}", "func (l *lexer) run() {\n\tfor state := lexStart; state != nil; {\n\t\tstate = state(l)\n\t}\n}", "func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}", "func (l *lexer) run() {\n\tfor state := lexText; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.tokens) // No more tokens will be delivered\n}", "func (l *lexer) run() {\n\tfor state := lexSchema; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items) // No more tokens will be delivered.\n}", "func (l *lexer) run() {\r\n\tfor l.state = lexAny(l); l.state != nil; {\r\n\t\tl.state = l.state(l)\r\n\t}\r\n\tclose(l.tokens)\r\n}", "func (sm safeMap) run() {\n\tstore := make(map[string]interface{})\n\tfor command := range sm {\n\t\tswitch command.action {\n\t\tcase INSERT:\n\t\t\tstore[command.key] = command.value\n\t\tcase REMOVE:\n\t\t\tdelete(store, command.key)\n\t\tcase FLUSH:\n\t\t\tflush(store, command.keys)\n\t\tcase FIND:\n\t\t\tvalue, found := store[command.key]\n\t\t\tcommand.result <- findResult{value, found}\n\t\tcase COUNT:\n\t\t\tcommand.result <- len(store)\n\t\tcase TRUNCATE:\n\t\t\tclearMap(store)\n\t\tcase END:\n\t\t\tclose(sm)\n\t\t\tcommand.data <- store\n\t\t}\n\t}\n}", "func (l *Clogger) run() {\n\tvar m string\n\tfor m = range l.in {\n\t\tfmt.Fprint(l.w, m)\n\t}\n\treturn\n}", "func (p *SingleLineParser) run() {\n\tfor input := range p.inputChan {\n\t\tp.process(input)\n\t}\n\tp.lineHandler.Stop()\n}", "func Reduce(reducef func(string, []string) string, reduceTask Task, NMap int) {\n\tkva := make([]KeyValue, 0)\n\tfor i := 0; i < NMap; i++ {\n\t\toname := fmt.Sprintf(\"mr-%v-%v.json\", i, reduceTask.Index)\n\t\tf, err := os.Open(oname)\n\t\tdefer f.Close()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot open %v\", oname)\n\t\t}\n\t\tdec := json.NewDecoder(f)\n\t\tfor {\n\t\t\tvar kv KeyValue\n\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tkva = append(kva, kv)\n\n\t\t}\n\t}\n\tsort.Sort(ByKey(kva))\n\toname := fmt.Sprintf(\"mr-out-%v\", reduceTask.Index)\n\tofile, _ := os.Create(oname)\n\n\ti := 0\n\tfor i < len(kva) {\n\t\tj := i + 1\n\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\tj++\n\t\t}\n\t\tvalues := []string{}\n\t\tfor k := i; k < j; k++ {\n\t\t\tvalues = append(values, kva[k].Value)\n\t\t}\n\t\toutput := reducef(kva[i].Key, values)\n\n\t\t// this is the correct format for each line of Reduce output.\n\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\ti = j\n\t}\n\tofile.Close()\n}", "func (m *Mapper) Run() error {\n\tm.wg.Add(1)\n\tdefer m.wg.Done()\n\n\t// We start trying to map at the root height.\n\theight, err := m.chain.Root()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get root height: %w\", err)\n\t}\n\n\t// We always initialize an empty state trie to refer to the first step\n\t// before the checkpoint. If there is no checkpoint, then the step after the\n\t// checkpoint will also just be the empty trie. Otherwise, the second trie\n\t// will load the checkpoint trie.\n\tempty := trie.NewEmptyMTrie()\n\tvar tree *trie.MTrie\n\tif m.checkpoint == \"\" {\n\t\ttree = empty\n\t} else {\n\t\tm.log.Info().Msg(\"checkpoint rebuild started\")\n\t\tfile, err := os.Open(m.checkpoint)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not open checkpoint file: %w\", err)\n\t\t}\n\t\tcheckpoint, err := wal.ReadCheckpoint(file)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not read checkpoint: %w\", err)\n\t\t}\n\t\ttrees, err := flattener.RebuildTries(checkpoint)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not rebuild tries: %w\", err)\n\t\t}\n\t\tif len(trees) != 1 {\n\t\t\treturn fmt.Errorf(\"should only have one trie in root checkpoint (tries: %d)\", len(trees))\n\t\t}\n\t\ttree = trees[0]\n\t\tm.log.Info().Msg(\"checkpoint rebuild finished\")\n\t}\n\n\tm.log.Info().Msg(\"path collection started\")\n\n\t// We have to index all of the paths from the checkpoint; otherwise, we will\n\t// miss every single one of the bootstrapped registers.\n\tpaths := make([]ledger.Path, 0, len(tree.AllPayloads()))\n\tqueue := deque.New()\n\troot := tree.RootNode()\n\tif root != nil {\n\t\tqueue.PushBack(root)\n\t}\n\tfor queue.Len() > 0 {\n\t\tnode := queue.PopBack().(*node.Node)\n\t\tif node.IsLeaf() {\n\t\t\tpath := node.Path()\n\t\t\tpaths = append(paths, *path)\n\t\t\tcontinue\n\t\t}\n\t\tif node.LeftChild() != nil {\n\t\t\tqueue.PushBack(node.LeftChild())\n\t\t}\n\t\tif node.RightChild() != nil {\n\t\t\tqueue.PushBack(node.RightChild())\n\t\t}\n\t}\n\n\tm.log.Info().Int(\"paths\", len(paths)).Msg(\"path collection finished\")\n\n\tm.log.Info().Msg(\"path sorting started\")\n\n\tsort.Slice(paths, func(i int, j int) bool {\n\t\treturn bytes.Compare(paths[i][:], paths[j][:]) < 0\n\t})\n\n\tm.log.Info().Msg(\"path sorting finished\")\n\n\t// When trying to go from one finalized block to the next, we keep a list\n\t// of intermediary tries until the full set of transitions have been\n\t// identified. We keep track of these transitions as steps in this map.\n\tsteps := make(map[flow.StateCommitment]*Step)\n\n\t// We start at an \"imaginary\" step that refers to an empty trie, has no\n\t// paths and no previous commit. We consider this step already done, so it\n\t// will never be indexed; it's merely used as the sentinel value for\n\t// stopping when we index the first block. It also makes sure that we don't\n\t// return a `nil` trie if we abort indexing before the first block is done.\n\temptyCommit := flow.DummyStateCommitment\n\tsteps[emptyCommit] = &Step{\n\t\tCommit: flow.StateCommitment{},\n\t\tPaths: nil,\n\t\tTree: empty,\n\t}\n\n\t// We then add a second step that refers to the first step that is already\n\t// done, which uses the commit of the initial state trie after the\n\t// checkpoint has been loaded, and contains all of the paths found in the\n\t// initial checkpoint state trie. This will make sure that we index all the\n\t// data from the checkpoint as part of the first block.\n\trootCommit := flow.StateCommitment(tree.RootHash())\n\tsteps[rootCommit] = &Step{\n\t\tCommit: emptyCommit,\n\t\tPaths: paths,\n\t\tTree: tree,\n\t}\n\n\t// This is how we let the indexing loop know that the first \"imaginary\" step\n\t// was already indexed. The `commitPrev` value is used as a sentinel value\n\t// for when to stop going backwards through the steps when indexing a block.\n\t// This means the value is always set to the last already indexed step.\n\tcommitPrev := emptyCommit\n\n\tm.log.Info().Msg(\"state indexing started\")\n\n\t// Next, we launch into the loop that is responsible for mapping all\n\t// incoming trie updates to a block. The loop itself has no concept of what\n\t// the next state commitment is that we should look at. It will simply try\n\t// to find a previous step for _any_ trie update that comes in. This means\n\t// that the first trie update needs to either apply to the empty trie or to\n\t// the trie after the checkpoint in order to be processed.\n\tonce := &sync.Once{}\nOuter:\n\tfor {\n\t\t// We want to check in this tight loop if we want to quit, just in case\n\t\t// we get stuck on a timed out network connection.\n\t\tselect {\n\t\tcase <-m.stop:\n\t\t\tbreak Outer\n\t\tdefault:\n\t\t\t// keep going\n\t\t}\n\n\t\tlog := m.log.With().\n\t\t\tUint64(\"height\", height).\n\t\t\tHex(\"commit_prev\", commitPrev[:]).Logger()\n\n\t\t// As a first step, we retrieve the state commitment of the finalized\n\t\t// block at the current height; we start at the root height and then\n\t\t// increase it each time we are done indexing a block. Once an applied\n\t\t// trie update gives us a state trie with the same root hash as\n\t\t// `commitNext`, we have reached the end state of the next finalized\n\t\t// block and can index all steps in-between for that block height.\n\t\tcommitNext, err := m.chain.Commit(height)\n\n\t\t// If the retrieval times out, it's possible that we are on a live chain\n\t\t// and the next block has not been finalized yet. We should thus simply\n\t\t// retry until we have a new block.\n\t\tif errors.Is(err, dps.ErrTimeout) {\n\t\t\tlog.Warn().Msg(\"commit retrieval timed out, retrying\")\n\t\t\tcontinue Outer\n\t\t}\n\n\t\t// If we have reached the end of the finalized blocks, we are probably\n\t\t// on a historical chain and there are no more finalized blocks for the\n\t\t// related spork. We can exit without error.\n\t\tif errors.Is(err, dps.ErrFinished) {\n\t\t\tlog.Debug().Msg(\"reached end of finalized chain\")\n\t\t\tbreak Outer\n\t\t}\n\n\t\t// Any other error should not happen and should crash explicitly.\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not retrieve next commit (height: %d): %w\", height, err)\n\t\t}\n\n\t\tlog = log.With().Hex(\"commit_next\", commitNext[:]).Logger()\n\n\tInner:\n\t\tfor {\n\t\t\t// We want to check in this tight loop if we want to quit, just in case\n\t\t\t// we get stuck on a timed out network connection.\n\t\t\tselect {\n\t\t\tcase <-m.stop:\n\t\t\t\tbreak Outer\n\t\t\tdefault:\n\t\t\t\t// keep going\n\t\t\t}\n\n\t\t\t// When we have the state commitment of the next finalized block, we\n\t\t\t// check to see if we find a trie for it in our steps. If we do, it\n\t\t\t// means that we have steps from the last finalized block to the\n\t\t\t// finalized block at the current height. This condition will\n\t\t\t// trigger immediately for every empty block.\n\t\t\t_, ok := steps[commitNext]\n\t\t\tif ok {\n\t\t\t\tbreak Inner\n\t\t\t}\n\n\t\t\t// If we don't find a trie for the current state commitment, we need\n\t\t\t// to keep applying trie updates to state tries until one of them\n\t\t\t// does have the correct commit. We simply feed the next trie update\n\t\t\t// here.\n\t\t\tupdate, err := m.feed.Update()\n\n\t\t\t// Once more, we might be on a live spork and the next delta might not\n\t\t\t// be available yet. In that case, keep trying.\n\t\t\tif errors.Is(err, dps.ErrTimeout) {\n\t\t\t\tlog.Warn().Msg(\"delta retrieval timed out, retrying\")\n\t\t\t\tcontinue Inner\n\t\t\t}\n\n\t\t\t// Similarly, if no more deltas are available, we reached the end of\n\t\t\t// the WAL and we are done reconstructing the execution state.\n\t\t\tif errors.Is(err, dps.ErrFinished) {\n\t\t\t\tlog.Debug().Msg(\"reached end of delta log\")\n\t\t\t\tbreak Outer\n\t\t\t}\n\n\t\t\t// Other errors should fail execution as they should not happen.\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not retrieve next delta: %w\", err)\n\t\t\t}\n\n\t\t\t// NOTE: We used to require a copy of the `RootHash` here, when it\n\t\t\t// was still a byte slice, as the underlying slice was being reused.\n\t\t\t// It was changed to a value type that is always copied now.\n\t\t\tcommitBefore := flow.StateCommitment(update.RootHash)\n\n\t\t\tlog := log.With().Hex(\"commit_before\", commitBefore[:]).Logger()\n\n\t\t\t// Once we have our new update and know which trie it should be\n\t\t\t// applied to, we check to see if we have such a trie in our current\n\t\t\t// steps. If not, we can simply skip it; this can happen, for\n\t\t\t// example, when there is an execution fork and the trie update\n\t\t\t// applies to an obsolete part of the blockchain history.\n\t\t\tstep, ok := steps[commitBefore]\n\t\t\tif !ok {\n\t\t\t\tlog.Debug().Msg(\"skipping trie update without matching trie\")\n\t\t\t\tcontinue Inner\n\t\t\t}\n\n\t\t\t// We de-duplicate the paths and payloads here. This replicates some\n\t\t\t// code that is part of the execution node and has moved between\n\t\t\t// different layers of the architecture. We keep it to be safe for\n\t\t\t// all versions of the Flow dependencies.\n\t\t\t// NOTE: Past versions of this code required paths to be copied,\n\t\t\t// because the underlying slice was being re-used. In contrary,\n\t\t\t// deep-copying payloads was a bad idea, because they were already\n\t\t\t// being copied by the trie insertion code, and it would have led to\n\t\t\t// twice the memory usage.\n\t\t\tpaths = make([]ledger.Path, 0, len(update.Paths))\n\t\t\tlookup := make(map[ledger.Path]*ledger.Payload)\n\t\t\tfor i, path := range update.Paths {\n\t\t\t\t_, ok := lookup[path]\n\t\t\t\tif !ok {\n\t\t\t\t\tpaths = append(paths, path)\n\t\t\t\t}\n\t\t\t\tlookup[path] = update.Payloads[i]\n\t\t\t}\n\t\t\tsort.Slice(paths, func(i, j int) bool {\n\t\t\t\treturn bytes.Compare(paths[i][:], paths[j][:]) < 0\n\t\t\t})\n\t\t\tpayloads := make([]ledger.Payload, 0, len(paths))\n\t\t\tfor _, path := range paths {\n\t\t\t\tpayloads = append(payloads, *lookup[path])\n\t\t\t}\n\n\t\t\t// We can now apply the trie update to the state trie as it was at\n\t\t\t// the previous step. This is where the trie code will deep-copy the\n\t\t\t// payloads.\n\t\t\t// NOTE: It's important that we don't shadow the variable here,\n\t\t\t// otherwise the root trie will never go out of scope and we will\n\t\t\t// never garbage collect any of the root trie payloads that have\n\t\t\t// been replaced by subsequent trie updates.\n\t\t\ttree, err = trie.NewTrieWithUpdatedRegisters(step.Tree, paths, payloads)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not update trie: %w\", err)\n\t\t\t}\n\n\t\t\t// We then store the new trie along with the state commitment of its\n\t\t\t// parent and the paths that were changed. This will make it\n\t\t\t// available for subsequent trie updates to be applied to it, and it\n\t\t\t// will also allow us to reconstruct the payloads changed in this\n\t\t\t// step by retrieving them directly from the trie with the given\n\t\t\t// paths.\n\t\t\tcommitAfter := flow.StateCommitment(tree.RootHash())\n\t\t\tstep = &Step{\n\t\t\t\tCommit: commitBefore,\n\t\t\t\tPaths: paths,\n\t\t\t\tTree: tree,\n\t\t\t}\n\t\t\tsteps[commitAfter] = step\n\n\t\t\tlog.Debug().Hex(\"commit_after\", commitAfter[:]).Msg(\"trie update applied\")\n\t\t}\n\n\t\t// At this point we have identified a step that has lead to the state\n\t\t// commitment of the finalized block at the current height. We can\n\t\t// retrieve some additional indexing data, such as the block header and\n\t\t// the events that resulted from transactions in the block.\n\t\theader, err := m.chain.Header(height)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not retrieve header: %w (height: %d)\", err, height)\n\t\t}\n\t\tevents, err := m.chain.Events(height)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not retrieve events: %w (height: %d)\", err, height)\n\t\t}\n\t\tblockID := header.ID()\n\n\t\t// We then index the data for the finalized block at the current height.\n\t\terr = m.index.Header(height, header)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not index header: %w\", err)\n\t\t}\n\t\terr = m.index.Commit(height, commitNext)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not index commit: %w\", err)\n\t\t}\n\t\terr = m.index.Events(height, events)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not index events: %w\", err)\n\t\t}\n\t\terr = m.index.Height(blockID, height)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not index block heights: %w\", err)\n\t\t}\n\n\t\t// TODO: In order to provide more complete responses for the Rosetta API\n\t\t// and to be able to implement all Access API endpoints, we need\n\t\t// transaction data in our index:\n\t\t// https://github.com/optakt/flow-dps/issues/156\n\n\t\t// TODO: In order to be able to implement Access API endpoints that use\n\t\t// block IDs, we need to start indexing block IDs at each height:\n\t\t// https://github.com/optakt/flow-dps/issues/157\n\n\t\t// In order to index the payloads, we step back from the state\n\t\t// commitment of the finalized block at the current height to the state\n\t\t// commitment of the last finalized block that was indexed. For each\n\t\t// step, we collect all the payloads by using the paths for the step and\n\t\t// index them as we go.\n\t\t// NOTE: We keep track of the paths for which we already indexed\n\t\t// payloads, so we can skip them in earlier steps. One inherent benefit\n\t\t// of stepping from the last step to the first step is that this will\n\t\t// automatically use only the latest update of a register, which is\n\t\t// exactly what we want.\n\t\tcommit := commitNext\n\t\tupdated := make(map[ledger.Path]struct{})\n\t\tfor commit != commitPrev {\n\n\t\t\t// In the first part, we get the step we are currently at and filter\n\t\t\t// out any paths that have already been updated.\n\t\t\tstep := steps[commit]\n\t\t\tpaths := make([]ledger.Path, 0, len(step.Paths))\n\t\t\tfor _, path := range step.Paths {\n\t\t\t\t_, ok := updated[path]\n\t\t\t\tif ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tpaths = append(paths, path)\n\t\t\t\tupdated[path] = struct{}{}\n\t\t\t}\n\n\t\t\t// We then divide the remaining paths into chunks of 1000. For each\n\t\t\t// batch, we retrieve the payloads from the state trie as it was at\n\t\t\t// the end of this block and index them.\n\t\t\tcount := 0\n\t\t\tn := 1000\n\t\t\ttotal := ((len(paths) + n - 1) / n)\n\t\t\tlog.Debug().Int(\"num_paths\", len(paths)).Int(\"num_batches\", total).Msg(\"path batching executed\")\n\t\t\tfor start := 0; start < len(paths); start += n {\n\t\t\t\t// This loop may take a while, especially for the root checkpoint\n\t\t\t\t// updates, so check if we should quit.\n\t\t\t\tselect {\n\t\t\t\tcase <-m.stop:\n\t\t\t\t\tbreak Outer\n\t\t\t\tdefault:\n\t\t\t\t\t// keep going\n\t\t\t\t}\n\n\t\t\t\tend := start + n\n\t\t\t\tif end > len(paths) {\n\t\t\t\t\tend = len(paths)\n\t\t\t\t}\n\t\t\t\tbatch := paths[start:end]\n\t\t\t\tpayloads := step.Tree.UnsafeRead(batch)\n\t\t\t\terr = m.index.Payloads(height, batch, payloads)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"could not index payloads: %w\", err)\n\t\t\t\t}\n\n\t\t\t\tcount++\n\n\t\t\t\tlog.Debug().Int(\"batch\", count).Int(\"start\", start).Int(\"end\", end).Msg(\"path batch indexed\")\n\t\t\t}\n\n\t\t\t// Finally, we forward the commit to the previous trie update and\n\t\t\t// repeat until we have stepped all the way back to the last indexed\n\t\t\t// commit.\n\t\t\tcommit = step.Commit\n\t\t}\n\n\t\t// At this point, we can delete any trie that does not correspond to\n\t\t// the state that we have just reached. This will allow the garbage\n\t\t// collector to free up any payload that has been changed and which is\n\t\t// no longer part of the state trie at the newly indexed finalized\n\t\t// block.\n\t\tfor key := range steps {\n\t\t\tif key != commitNext {\n\t\t\t\tdelete(steps, key)\n\t\t\t}\n\t\t}\n\n\t\t// Last but not least, we take care of properly indexing the height of\n\t\t// the first indexed block and the height of the last indexed block.\n\t\tonce.Do(func() { err = m.index.First(height) })\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not index first height: %w\", err)\n\t\t}\n\t\terr = m.index.Last(height)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not index last height: %w\", err)\n\t\t}\n\n\t\t// We have now successfully indexed all state trie changes and other\n\t\t// data at the current height. We set the last indexed step to the last\n\t\t// step from our current height, and then increase the height to start\n\t\t// the indexing of the next block.\n\t\tcommitPrev = commitNext\n\t\theight++\n\n\t\tlog.Info().\n\t\t\tHex(\"block\", blockID[:]).\n\t\t\tInt(\"num_changes\", len(updated)).\n\t\t\tInt(\"num_events\", len(events)).\n\t\t\tMsg(\"block data indexed\")\n\t}\n\n\tm.log.Info().Msg(\"state indexing finished\")\n\n\tstep := steps[commitPrev]\n\tm.post(step.Tree)\n\n\treturn nil\n}", "func MapReduce(works chan MapReducable, workers int) {\n\tserials := make(chan chan Reducable, workers)\n\tgo func() {\n\t\tfor work := range works {\n\t\t\twork := work\n\t\t\tserial := make(chan Reducable)\n\t\t\tserials <- serial\n\t\t\tgo func() {\n\t\t\t\tserial <- work.Map()\n\t\t\t}()\n\t\t}\n\t\tclose(serials)\n\t}()\n\tfor serial := range serials {\n\t\tr := <-serial\n\t\tr.Reduce()\n\t}\n}", "func (r *Runner) run() {\n\tfor {\n\t\ttask := r.rq.Pop()\n\t\tr.process(task)\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\tisMapFinished := false\n\tfor isMapFinished != true {\n\t\tresp := CallAssignMapTask()\n\t\tmaptask := resp.Task\n\t\tnReduce := resp.NReduce\n\n\t\tif maptask.TaskNum != -1 {\n\t\t\tfile, err := os.Open(maptask.Filename)\n\t\t\tdefer file.Close()\n\n\t\t\tlog.Printf(\"[Worker %v] Starting on map task: %+v\\n\", os.Getpid(), maptask.Filename)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot open map file %v\\n\", err)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\\n\", maptask.Filename)\n\t\t\t}\n\t\t\tmaptask.Result = mapf(maptask.Filename, string(content))\n\n\t\t\tintermediate := make(map[int][]KeyValue)\n\t\t\tfor _, kv := range maptask.Result {\n\t\t\t\treduceTaskNum := ihash(kv.Key) % nReduce\n\t\t\t\tintermediate[reduceTaskNum] = append(intermediate[reduceTaskNum], kv)\n\t\t\t}\n\n\t\t\tfor i := 0; i < nReduce; i++ {\n\t\t\t\ttmpFileName := \"tmp-\" + strconv.Itoa(maptask.TaskNum) + \"-\" + strconv.Itoa(i) + \".txt\"\n\t\t\t\tifile, err := ioutil.TempFile(\"\", tmpFileName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Cannot create ifile: %v\\n\", err)\n\t\t\t\t}\n\n\t\t\t\tenc := json.NewEncoder(ifile)\n\t\t\t\tfor _, kv := range intermediate[i] {\n\t\t\t\t\tif err := enc.Encode(&kv); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"Cannot write to file: %v\\n\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tos.Rename(ifile.Name(), tmpFileName)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"[Worker %v] Waiting for other workers to finish...\\n\", os.Getpid())\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\n\t\tisMapFinished = CallCompleteMapTask(maptask)\n\t}\n\n\tisReduceFinished := false\n\tfor isReduceFinished != true {\n\t\treducetask := CallAssignReduceTask()\n\n\t\tif reducetask.TaskNum != -1 {\n\t\t\tlog.Printf(\"[Worker %v] Starting on reduce task: %+v\\n\", os.Getpid(), reducetask)\n\t\t\tpattern := fmt.Sprintf(\"./tmp-*-%v.txt\", reducetask.TaskNum)\n\t\t\tfilenames, _ := filepath.Glob(pattern)\n\t\t\tvar intermediate []KeyValue\n\t\t\tfor _, p := range filenames {\n\t\t\t\tfile, err := os.Open(p)\n\t\t\t\tdefer file.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"cannot open reduce %v\\n\", p)\n\t\t\t\t}\n\t\t\t\tdec := json.NewDecoder(file)\n\t\t\t\tfor {\n\t\t\t\t\tvar kv KeyValue\n\t\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tintermediate = append(intermediate, kv)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\toname := \"./mr-out-\" + strconv.Itoa(reducetask.TaskNum)\n\t\t\tofile, _ := os.Create(oname)\n\t\t\tdefer ofile.Close()\n\t\t\ti := 0\n\t\t\tfor i < len(intermediate) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(intermediate) && intermediate[i].Key == intermediate[j].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, intermediate[k].Value)\n\t\t\t\t}\n\n\t\t\t\toutput := reducef(intermediate[i].Key, values)\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"[Worker %v] Waiting for other workers to finish...\\n\", os.Getpid())\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\n\t\tisReduceFinished = CallCompleteReduceTask(reducetask)\n\t}\n\n}", "func (l *lexer) run() {\n\tfor l.state = lexText; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n}", "func (l *lexer) run() {\n\tfor l.state = lexText; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n}", "func (f Filter) run(node *yaml.RNode) error {\n\tfor key, value := range f.Annotations {\n\t\tif err := node.PipeE(fsslice.Filter{\n\t\t\tFsSlice: f.FsSlice,\n\t\t\tSetValue: fsslice.SetEntry(key, value),\n\t\t\tCreateKind: yaml.MappingNode, // Annotations are MappingNodes.\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func main() {\n \tfmt.Println(\"Welcome to my MapReduce!\");\n\n\tif len(os.Args) != 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tif os.Args[3] == \"sequential\" {\n\t\t\tmapreduce.RunSingle(5, 3, os.Args[2], Map, Reduce)\n\t\t} else {\n\t\t\tmr := mapreduce.MakeMapReduce(5, 3, os.Args[2], os.Args[3])\n\t\t\t// Wait until MR is done\n\t\t\t<-mr.DoneChannel\n\t\t}\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], Map, Reduce, 100)\n\t}\n}", "func MapReduce(input int, reduce func(results []int) int, tasks ...Task) (int, error) {\n\t// 1. tasks => taskCh (chan Task)\n\t// 2. Limited number of gophers work on tasks\n\t// - output => output channel\n\t// 3. goroutine pull data from output channel\n\t// - get a slice of output\n\t// 4. feed reduce func with the slice\n\t// * if any error occured, abort & clean goroutines\n\t// - taskCh = drain\n\t// - outCh = close & drain\n\ttype taskResult struct {\n\t\to int\n\t\te error\n\t}\n\t// for abort\n\tabort := make(chan struct{})\n\n\tabortSwicher := func() bool {\n\t\tselect {\n\t\tcase <-abort:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// step 1\n\ttaskCh := make(chan Task)\n\tgo func() {\n\t\tfor _, t := range tasks {\n\t\t\ttaskCh <- t\n\t\t}\n\t\tclose(taskCh)\n\t}()\n\t// step 2\n\t// in order to close channel(outCh), use WaitGroup\n\tvar wg sync.WaitGroup\n\tworkerNum := 4\n\toutCh := make(chan taskResult)\n\terrCh := make(chan taskResult, 10) // blocked without a buffer\n\tfor i := 0; i < workerNum; i++ {\n\t\tgo func() {\n\t\t\tfor t := range taskCh {\n\t\t\t\tif abortSwicher() {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\twg.Add(1)\n\t\t\t\to, e := t.Execute(input)\n\t\t\t\tif e != nil && !abortSwicher() {\n\t\t\t\t\terrCh <- taskResult{o, e}\n\t\t\t\t\tclose(abort)\n\t\t\t\t} else {\n\t\t\t\t\toutCh <- taskResult{o, e}\n\t\t\t\t}\n\t\t\t\twg.Done()\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(outCh)\n\t\tclose(errCh)\n\t}()\n\n\t// step 3\n\tres := []int{}\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor o := range outCh {\n\t\t\tif abortSwicher() {\n\t\t\t\tfor range outCh {\n\t\t\t\t} // drain outCh\n\t\t\t\tfor range taskCh {\n\t\t\t\t} // drain taskCh\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tres = append(res, o.o)\n\t\t}\n\t\tclose(done)\n\t}()\n\t// step 4\n\t<-done\n\n\tif abortSwicher() {\n\t\to := <-errCh\n\t\tfor range errCh {\n\t\t} // drain errCh\n\t\treturn o.o, o.e\n\t} else {\n\t\treturn reduce(res), nil\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\tfmt.Println(\"make worker\")\n\n\targs := MRArgs{}\n\targs.Phase = registerPhase\n\n\treply := MRReply{}\n\tcall(\"Master.Schedule\", &args, &reply)\n\t//向master注册\n\n\t//fmt.Printf(\"get map task %v\\n\", reply.NTask)\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\t//fmt.Printf(\"get map task %v\\n\", reply.NTask)\n\n\tfor reply.TaskNum != -2 {\n\n\t\t//reply的TaskNum为-1代表此时仍然有任务未完成,但是这些任务还在生存周期内\n\t\t//reply的TaskNum为-2代表此时仍然所有任务都已完成\n\t\t//fmt.Println(\"get map task\")\n\t\tfmt.Printf(\"get map task %v %v\\n\", reply.TaskNum, reply.FileName)\n\n\t\tif reply.TaskNum == -1 {\n\t\t\t//休眠3s再向master询问\n\t\t\ttime.Sleep(time.Duration(3) * time.Second)\n\t\t\tfmt.Printf(\"worker wake up\\n\")\n\t\t\targs = MRArgs{}\n\t\t\targs.Phase = mapPhase\n\t\t\targs.TaskNum = -1\n\n\t\t\treply = MRReply{}\n\t\t\tcall(\"Master.Schedule\", &args, &reply)\n\t\t\tcontinue\n\t\t}\n\n\t\t//这里与mrsequential.go相似,完成map任务,并输出到中间文件中\n\t\tintermediate := []KeyValue{}\n\t\tfilename := reply.FileName\n\t\tfile, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot open %v\", filename)\n\t\t}\n\t\tcontent, err := ioutil.ReadAll(file)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot read %v\", filename)\n\t\t}\n\t\tfile.Close()\n\t\tkva := mapf(filename, string(content))\n\t\tintermediate = append(intermediate, kva...)\n\t\tsort.Sort(ByKey(intermediate))\n\n\t\tfilesenc := make([]*json.Encoder, reply.NTask)\n\t\tfiles := make([]*os.File, reply.NTask)\n\n\t\tfor i := 0; i < reply.NTask; i++ {\n\t\t\tfileName := \"mr-\" + strconv.Itoa(reply.TaskNum) + \"-\" + strconv.Itoa(i)\n\t\t\tfout, err := os.Create(fileName)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(fileName, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfilesenc[i] = json.NewEncoder(fout)\n\t\t\tfiles[i] = fout\n\t\t}\n\n\t\ti := 0\n\t\tfor i < len(intermediate) {\n\t\t\tj := i\n\t\t\toutput := KeyValue{intermediate[i].Key, intermediate[i].Value}\n\n\t\t\tfor ; j < len(intermediate) && intermediate[j].Key == intermediate[i].Key; j++ {\n\n\t\t\t\terr := filesenc[ihash(intermediate[i].Key)%reply.NTask].Encode(&output)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"%s Encode Failed %v\\n\", intermediate[i].Key, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// this is the correct format for each line of Reduce output.\n\n\t\t\ti = j\n\t\t}\n\n\t\tfor _, f := range files {\n\t\t\tf.Close()\n\t\t}\n\n\t\targs = MRArgs{}\n\t\targs.Phase = mapPhase\n\t\targs.TaskNum = reply.TaskNum\n\n\t\treply = MRReply{}\n\t\tcall(\"Master.Schedule\", &args, &reply)\n\t}\n\n\targs = MRArgs{}\n\targs.Phase = waitReducePhase\n\n\treply = MRReply{}\n\n\tcall(\"Master.Schedule\", &args, &reply)\n\n\tfor reply.TaskNum != -2 {\n\n\t\t//reply的TaskNum为-1代表此时仍然有任务未完成,但是这些任务还在生存周期内\n\t\t//reply的TaskNum为-2代表此时仍然所有任务都已完成\n\n\t\tif reply.TaskNum == -1 {\n\t\t\ttime.Sleep(time.Duration(1) * time.Second)\n\t\t\targs = MRArgs{}\n\t\t\targs.Phase = reducePhase\n\t\t\targs.TaskNum = -1\n\n\t\t\treply = MRReply{}\n\t\t\tcall(\"Master.Schedule\", &args, &reply)\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Printf(\"get reduce task %v\\n\", reply.TaskNum)\n\n\t\tkva := []KeyValue{}\n\t\tfor j := 0; j < reply.NTask; j++ {\n\t\t\tfilename := \"mr-\" + strconv.Itoa(j) + \"-\" + strconv.Itoa(reply.TaskNum)\n\t\t\tfile, err := os.Open(filename)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdec := json.NewDecoder(file)\n\t\t\tfor {\n\t\t\t\tvar kv KeyValue\n\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tkva = append(kva, kv)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t}\n\n\t\tsort.Sort(ByKey(kva))\n\n\t\toname := \"mr-out-\" + strconv.Itoa(reply.TaskNum)\n\t\tofile, _ := os.Create(oname)\n\n\t\ti := 0\n\n\t\tfmt.Printf(\"reduce taks %v length %v\\n\", reply.TaskNum, len(kva))\n\t\tfor i < len(kva) {\n\t\t\tj := i + 1\n\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\tj++\n\t\t\t}\n\t\t\tvalues := []string{}\n\t\t\tfor k := i; k < j; k++ {\n\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t}\n\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t// this is the correct format for each line of Reduce output.\n\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\ti = j\n\t\t}\n\n\t\targs = MRArgs{}\n\t\targs.Phase = reducePhase\n\t\targs.TaskNum = reply.TaskNum\n\n\t\treply = MRReply{}\n\t\tcall(\"Master.Schedule\", &args, &reply)\n\t}\n\n}", "func Map(mapf func(string, string) []KeyValue, mapTask Task, NReducer int) {\n\tencs := make([]*json.Encoder, NReducer)\n\tfs := make([]*os.File, NReducer)\n\n\tfor i := 0; i < NReducer; i++ {\n\t\toname := fmt.Sprintf(\"mr-%v-%v.json\", mapTask.Index, i)\n\t\tf, err := os.Create(oname)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Cannot open file %v\", oname)\n\t\t}\n\t\tenc := json.NewEncoder(f)\n\t\tfs[i] = f\n\t\tencs[i] = enc\n\t}\n\n\tfile, err := os.Open(mapTask.FilePath)\n\tdefer file.Close()\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot open %v\", mapTask.FilePath)\n\t}\n\tcontent, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot read %v\", mapTask.FilePath)\n\t}\n\n\tkva := mapf(mapTask.FilePath, string(content))\n\n\tfor _, kv := range kva {\n\t\tid := ihash(kv.Key) % NReducer\n\t\tenc := encs[id]\n\t\terr := enc.Encode(&kv)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Cannot encode Key-Value pair %v\", kv)\n\t\t}\n\t}\n\tfor i := 0; i < NReducer; i++ {\n\t\tfs[i].Close()\n\t}\n\n}", "func main() {\n\tfmt.Println(\"Generating code\")\n\treduce.GenerateDispatcher(outputPath)\n\t//reduce.GenerateDispatcher(outputPath)\n}", "func (route *GrafanaNet) run(in chan []byte) {\n\tvar metrics []*schema.MetricData\n\tbuffer := new(bytes.Buffer)\n\n\ttimer := time.NewTimer(route.Cfg.FlushMaxWait)\n\tfor {\n\t\tselect {\n\t\tcase buf := <-in:\n\t\t\troute.numBuffered.Dec(1)\n\t\t\tmd, err := parseMetric(buf, route.schemas, route.Cfg.OrgID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"RouteGrafanaNet: parseMetric failed: %s. skipping metric\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmd.SetId()\n\t\t\tmetrics = append(metrics, md)\n\n\t\t\tif len(metrics) == route.Cfg.FlushMaxNum {\n\t\t\t\tmetrics = route.retryFlush(metrics, buffer)\n\t\t\t\t// reset our timer\n\t\t\t\tif !timer.Stop() {\n\t\t\t\t\t<-timer.C\n\t\t\t\t}\n\t\t\t\ttimer.Reset(route.Cfg.FlushMaxWait)\n\t\t\t}\n\t\tcase <-timer.C:\n\t\t\ttimer.Reset(route.Cfg.FlushMaxWait)\n\t\t\tmetrics = route.retryFlush(metrics, buffer)\n\t\tcase <-route.shutdown:\n\t\t\tmetrics = route.retryFlush(metrics, buffer)\n\t\t\treturn\n\t\t}\n\t}\n\troute.wg.Done()\n}", "func (inst *Instance) Run(input map[string]interface{}) (output map[string]interface{}, err error) {\n\n\t// Get the Scope of the CML pipeline.\n\t// Scope is the collection of the data in the CML\n\tscope, err := NewPipelineScope(input, inst.def.labels)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Log the time\n\tstart := time.Now()\n\n\t//Check the type of the input of the pipeline.\n\tfor key, _ := range inst.def.input {\n\n\t\ttemp, ok := inst.def.input[key].(PipelineInput)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = types.ValidateType(temp.Type, input[key])\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t}\n\n\t//Run the tasks.\n\tfor key, task := range inst.def.tasks {\n\t\ttask.Position()\n\t\tscope, err = task.Eval(scope, inst.logger)\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error %s in task \\\"%s-%v\\\" \", err.Error(), task.Name(), key)\n\t\t}\n\n\t}\n\n\t// Set the output.\n\n\tif inst.def.output.Data != nil {\n\t\tmf := GetMapperFactory()\n\t\tmappings := make(map[string]interface{})\n\n\t\t// Type Switch\n\t\tswitch t := inst.def.output.Data.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tfor key, val := range t {\n\t\t\t\tmappings[key] = val\n\t\t\t}\n\t\tdefault:\n\t\t\tmappings[\"data\"] = inst.def.output.Data\n\t\t}\n\n\t\t// Get the data from output expression\n\t\toutMapper, err := mf.NewMapper(mappings)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toutput, err = outMapper.Apply(scope)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar definedType data.Type\n\n\t\t// Check if the output is defined as dataframe or map.\n\t\tif inst.def.output.Type == \"dataframe\" || inst.def.output.Type == \"map\" {\n\t\t\tdefinedType, err = data.ToTypeEnum(\"object\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tgivenType, err := data.GetType(output)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif definedType != givenType {\n\t\t\t\treturn nil, fmt.Errorf(\"Type mismatch in output. Defined type [%s] passed type [%s]\", definedType, givenType)\n\t\t\t}\n\n\t\t\tinst.logger.Infof(\"The output took %v to calculate\", time.Since(start))\n\n\t\t\treturn output, nil\n\t\t}\n\n\t\tdefinedType, _ = data.ToTypeEnum(inst.def.output.Type)\n\n\t\tfor key, _ := range output {\n\n\t\t\tgivenType, err := data.GetType(output[key])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif definedType != givenType {\n\t\t\t\treturn nil, fmt.Errorf(\"Type mismatch in output. Defined type [%s] passed type [%s]\", definedType, givenType)\n\t\t\t}\n\t\t}\n\n\t}\n\tinst.logger.Infof(\"The output took %v to calculate\", time.Since(start))\n\n\treturn output, nil\n\n}", "func Worker(mapf func(string, string) []KeyValue, reducef func(string, []string) string) {\n\t// 单机运行,直接使用 PID 作为 Worker ID,方便 debug\n\tid := strconv.Itoa(os.Getpid())\n\tlog.Printf(\"Worker %s started\\n\", id)\n\n\t// 进入循环,向 Coordinator 申请 Task\n\tvar lastTaskType string\n\tvar lastTaskIndex int\n\tfor {\n\t\targs := ApplyForTaskArgs{\n\t\t\tWorkerID: id,\n\t\t\tLastTaskType: lastTaskType,\n\t\t\tLastTaskIndex: lastTaskIndex,\n\t\t}\n\t\treply := ApplyForTaskReply{}\n\t\tcall(\"Coordinator.ApplyForTask\", &args, &reply)\n\n\t\tif reply.TaskType == \"\" {\n\t\t\t// MR 作业已完成,退出\n\t\t\tlog.Printf(\"Received job finish signal from coordinator\")\n\t\t\tbreak\n\t\t}\n\n\t\tlog.Printf(\"Received %s task %d from coordinator\", reply.TaskType, reply.TaskIndex)\n\t\tif reply.TaskType == MAP {\n\t\t\t// 读取输入数据\n\t\t\tfile, err := os.Open(reply.MapInputFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to open map input file %s: %e\", reply.MapInputFile, err)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to read map input file %s: %e\", reply.MapInputFile, err)\n\t\t\t}\n\t\t\t// 传递输入数据至 MAP 函数,得到中间结果\n\t\t\tkva := mapf(reply.MapInputFile, string(content))\n\t\t\t// 按 Key 的 Hash 值对中间结果进行分桶\n\t\t\thashedKva := make(map[int][]KeyValue)\n\t\t\tfor _, kv := range kva {\n\t\t\t\thashed := ihash(kv.Key) % reply.ReduceNum\n\t\t\t\thashedKva[hashed] = append(hashedKva[hashed], kv)\n\t\t\t}\n\t\t\t// 写出中间结果文件\n\t\t\tfor i := 0; i < reply.ReduceNum; i++ {\n\t\t\t\tofile, _ := os.Create(tmpMapOutFile(id, reply.TaskIndex, i))\n\t\t\t\tfor _, kv := range hashedKva[i] {\n\t\t\t\t\tfmt.Fprintf(ofile, \"%v\\t%v\\n\", kv.Key, kv.Value)\n\t\t\t\t}\n\t\t\t\tofile.Close()\n\t\t\t}\n\t\t} else if reply.TaskType == REDUCE {\n\t\t\t// 读取输入数据\n\t\t\tvar lines []string\n\t\t\tfor mi := 0; mi < reply.MapNum; mi++ {\n\t\t\t\tinputFile := finalMapOutFile(mi, reply.TaskIndex)\n\t\t\t\tfile, err := os.Open(inputFile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Failed to open map output file %s: %e\", inputFile, err)\n\t\t\t\t}\n\t\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Failed to read map output file %s: %e\", inputFile, err)\n\t\t\t\t}\n\t\t\t\tlines = append(lines, strings.Split(string(content), \"\\n\")...)\n\t\t\t}\n\t\t\tvar kva []KeyValue\n\t\t\tfor _, line := range lines {\n\t\t\t\tif strings.TrimSpace(line) == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tparts := strings.Split(line, \"\\t\")\n\t\t\t\tkva = append(kva, KeyValue{\n\t\t\t\t\tKey: parts[0],\n\t\t\t\t\tValue: parts[1],\n\t\t\t\t})\n\t\t\t}\n\t\t\tsort.Sort(ByKey(kva))\n\n\t\t\tofile, _ := os.Create(tmpReduceOutFile(id, reply.TaskIndex))\n\t\t\ti := 0\n\t\t\tfor i < len(kva) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvar values []string\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t\tofile.Close()\n\t\t}\n\t\tlastTaskType = reply.TaskType\n\t\tlastTaskIndex = reply.TaskIndex\n\t\tlog.Printf(\"Finished %s task %d\", reply.TaskType, reply.TaskIndex)\n\t}\n\n\tlog.Printf(\"Worker %s exit\\n\", id)\n}", "func (p *spanParser) run() {\n\tfor p.state = parseSpan; p.state != nil; {\n\t\tp.state = p.state(p)\n\t}\n\tclose(p.spanChan)\n}", "func (mpd *MPD) Run() error {\n\tmpd.Info(\"maportd run\")\n\tfor key, mapper := range mpd.portMaps {\n\t\tmpd.Info(\"starting %s\", key)\n\t\tif err := mapper.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (m *Master) MapReduceHandler(args *MapReduceArgs, reply *MapReduceReply) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tif args.MessageType == \"request\" {\n\t\tif !m.MapFinish {\n\t\t\tfor index, task := range m.MapTasks {\n\t\t\t\tif task.TaskStatus == \"Unassigned\" {\n\t\t\t\t\tm.MapTasks[index].TaskStatus = \"Assigned\"\n\t\t\t\t\treply.Task = m.MapTasks[index]\n\t\t\t\t\tgo m.checkTimeout(\"Map\", index, 10)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treply.Task.TaskType = \"Wait\"\n\t\t\treturn nil\n\t\t} else if !m.ReduceFinish {\n\t\t\tfor index, task := range m.ReduceTasks {\n\t\t\t\tif task.TaskStatus == \"Unassigned\" {\n\t\t\t\t\tm.ReduceTasks[index].TaskStatus = \"Assigned\"\n\t\t\t\t\treply.Task = m.ReduceTasks[index]\n\t\t\t\t\tgo m.checkTimeout(\"Reduce\", index, 10)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treply.Task.TaskType = \"Wait\"\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t} else if args.MessageType == \"finish\" {\n\t\tif args.Task.TaskType == \"Map\" {\n\t\t\tm.MapTasks[args.Task.TaskNum].TaskStatus = \"Finished\"\n\t\t\tm.NumMapFinished = m.NumMapFinished + 1\n\t\t\tif m.NumMapFinished == m.NumMap {\n\t\t\t\tm.MapFinish = true\n\t\t\t}\n\t\t} else {\n\t\t\tm.ReduceTasks[args.Task.TaskNum].TaskStatus = \"Finished\"\n\t\t\tm.NumReduceFinished = m.NumReduceFinished + 1\n\t\t\tif m.NumReduceFinished == m.NumReduce {\n\t\t\t\tm.ReduceFinish = true\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}", "func (r *Reader) Run(ctx context.Context, outChan chan cortex_chunk.Chunk) {\n\terrChan := make(chan error)\n\tdefer close(outChan)\n\n\treadCtx, cancel := context.WithCancel(ctx)\n\n\t// starting workers\n\tfor i := 0; i < r.cfg.NumWorkers; i++ {\n\t\tr.workerGroup.Add(1)\n\t\tgo r.readLoop(readCtx, outChan, errChan)\n\t}\n\n\tgo func() {\n\t\t// cancel context when an error occurs or errChan is closed\n\t\tdefer cancel()\n\n\t\terr := <-errChan\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\tlogrus.WithError(err).Errorln(\"error scanning chunks, stopping read operation\")\n\t\t\tclose(r.quit)\n\t\t}\n\t}()\n\n\tscanRequests := r.planner.Plan()\n\tlogrus.Infof(\"built %d plans for reading\", len(scanRequests))\n\n\tdefer func() {\n\t\t// lets wait for all workers to finish before we return.\n\t\t// An error in errChan would cause all workers to stop because we cancel the context.\n\t\t// Otherwise closure of scanRequestsChan(which is done after sending all the scanRequests) should make all workers to stop.\n\t\tr.workerGroup.Wait()\n\t\tclose(errChan)\n\t}()\n\n\t// feeding scan requests to workers\n\tfor _, req := range scanRequests {\n\t\tselect {\n\t\tcase r.scanRequestsChan <- req:\n\t\t\tcontinue\n\t\tcase <-r.quit:\n\t\t\treturn\n\t\t}\n\t}\n\n\t// all scan requests are fed, close the channel\n\tclose(r.scanRequestsChan)\n}", "func (m *mapper) start() {\n\tm.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ,\n\t\tm.executor.min, m.executor.max, m.executor.interval)\n\tgo m.run()\n}", "func (w *SimpleMapReduce) Start() *SimpleMapReduce {\n if (w.hasStarted) {\n return w\n }\n\n w.hasStarted = true\n\n for i := 0; i < w.mappers; i++ {\n mapFn := w.mapFn\n mapperFinished := make(chan bool)\n w.mappersFinished[i] = mapperFinished\n\n // Parallel function which performs the map and adds the result to the reduction queue\n go func() {\n for item := range w.workQueue {\n res := mapFn(item)\n w.reduceQueue <- res\n }\n close(mapperFinished)\n }()\n }\n\n // If a reduction function is specified, start it. Otherwise, simply close the reducedFinish\n // channel.\n if (w.reduceFn != nil) {\n go func() {\n w.reduceFn(w.reduceQueue)\n close(w.reducedFinished)\n }()\n } else {\n close(w.reducedFinished)\n }\n\n return w\n}", "func (n *NodeDrainer) run(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-n.ctx.Done():\n\t\t\treturn\n\t\tcase nodes := <-n.deadlineNotifier.NextBatch():\n\t\t\tn.handleDeadlinedNodes(nodes)\n\t\tcase req := <-n.jobWatcher.Drain():\n\t\t\tn.handleJobAllocDrain(req)\n\t\tcase allocs := <-n.jobWatcher.Migrated():\n\t\t\tn.handleMigratedAllocs(allocs)\n\t\t}\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t//Your worker implementation here.\n\tmJobChan := make(chan MRJob)\n\trJobChan := make(chan MRJob)\n\tctx, cancel := context.WithCancel(context.Background()) // used to manage the MR Job\n\targs := MRArgs{\n\t\tStatus: \"INITIAL\",\n\t}\n\n\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\n\tfor {\n\t\tselect {\n\t\tcase mJob := <-mJobChan:\n\t\t\terr := doMap(mapf, mJob)\n\t\t\tif err != nil {\n\t\t\t\targs.Status = \"FAILED\"\n\t\t\t} else {\n\t\t\t\targs.Status = \"FINISHED\"\n\t\t\t}\n\t\t\targs.MId = mJob.JobNum\n\t\t\targs.RId = -1\n\t\t\targs.JobType = \"MAP\"\n\t\t\tlog.Printf(\"MAP: %v, %v request Job\", args.Status, args.MId)\n\t\t\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\t\tcase rJob := <-rJobChan:\n\t\t\terr := doReduce(reducef, rJob)\n\t\t\tif err != nil {\n\t\t\t\targs.Status = \"FAILED\"\n\t\t\t} else {\n\t\t\t\targs.Status = \"FINISHED\"\n\t\t\t}\n\t\t\targs.MId = -1\n\t\t\targs.RId = rJob.JobNum\n\t\t\targs.JobType = \"REDUCE\"\n\t\t\tlog.Printf(\"REDUCE: %v %v, request Job\", args.Status, args.RId)\n\t\t\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\t\tcase <-ctx.Done():\n\t\t\tlog.Println(\"Worker is stopped\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// uncomment to send the Example RPC to the master.\n\t//CallExample()\n}", "func run(arg0 string, args ...string) error {\n\tcmd := exec.Command(arg0, args...)\n\tpipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd.Stderr = cmd.Stdout\n\n\tfmt.Println(\"Running command:\", arg0, strings.Join(args, \" \"))\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Stream the output from r10k as it is generated\n\tscanner := bufio.NewScanner(pipe)\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t\tm := scanner.Text()\n\t\tfmt.Println(m)\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Run() ([]*collectors.MetricResult, error) {\n\tvar (\n\t\tdata []byte\n\t\terr error\n\t)\n\tif data, err = loader(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn preformatter(data)\n}", "func (mgr *manager) run() {\n\tlog(mgr.reportingTo.Name(), \"working\", nil, false)\n\tdefer log(mgr.reportingTo.Name(), \"all done\", nil, false)\n\tstepFn := mgr.step_Accepting\n\tfor {\n\t\tif stepFn == nil {\n\t\t\tbreak\n\t\t}\n\t\tstepFn = stepFn()\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\t// 1. notify master of worker creation\n\tworkerID, nReduce := WorkerCreation()\n\n\tfor true{\n\t\ttask, files, taskID := RequestWork()\n\n\t\tif task == \"done\"{\n\t\t\tfmt.Printf(\"Worker %v received done signal\", workerID)\n\n\t\t\t// Notify master of shut down completion\n\t\t\tWorkerShutDown(workerID)\n\t\t\treturn\n\t\t}\n\n\t\tif task == \"map\"{\n\t\t\tfmt.Printf(\"Worker %v received map task\\n\", workerID)\n\n\t\t\tfileName := files[0]\n\t\t\t// read file contents\n\t\t\tfile, _:= os.Open(fileName)\n\t\t\tcontents, _ := ioutil.ReadAll(file)\n\t\t\tfile.Close()\n\n\t\t\tkva := mapf(fileName, string(contents))\n\n\t\t\t// Generate 10 intermediate files\n\t\t\toffset := len(kva) / nReduce\n\t\t\tstart := 0\n\t\t\tend := start + offset\n\n\t\t\tintermediateFiles := make([]string, 0)\n\n\t\t\tfor i:=0; i<nReduce; i++{\n\t\t\t\tend = min(end, len(kva))\n\n\t\t\t\tsegment := kva[start:end]\n\t\t\t\tstart += offset\n\t\t\t\tend += offset\n\n\t\t\t\t// Write to intermediate file\n\t\t\t\tfileName := \"mrIntermediate-\" + strconv.Itoa(taskID) + \"-\" + strconv.Itoa(i)\n\t\t\t\tintermediateFiles = append(intermediateFiles, fileName)\n\n\t\t\t\tofile, _ := os.Create(fileName)\n\t\t\t\tfor j:=0; j<len(segment); j++{\n\t\t\t\t\tpair := segment[j]\n\n\t\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", pair.Key, pair.Value)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tMapDone(intermediateFiles)\n\n\t\t} else if task == \"reduce\"{\n\t\t\t// Create <word, list(pair(word, 1))> hash map\n\t\t\tkv_map := make(map[string]([]string))\n\n\t\t\tfmt.Printf(\"Worker %v reduce task received\\n\", workerID)\n\n\t\t\t// Hash all rows in each intermediate file\n\t\t\tfor i:=0; i<len(files); i++{\n\t\t\t\tfile := files[i]\n\n\t\t\t\t// read file contents\n\t\t\t\tf, _ := os.Open(file)\n\n\t\t\t\tscanner := bufio.NewScanner(f)\n\t\t\t\tfor scanner.Scan() {\n\t\t\t\t\tline := scanner.Text()\n\n\t\t\t\t\twords := strings.Fields(line)\n\t\t\t\t\tkey := words[0]\n\n\t\t\t\t\tkv_map[key] = append(kv_map[key], line)\n\t\t\t\t}\n\n\t\t\t\tf.Close()\n\t\t\t}\n\n\t\t\t// Sort keys in ascending order\n\t\t\tsortedKeys := make([]string, 0)\n\n\t\t\tfor k, _ := range kv_map{\n\t\t\t\tsortedKeys = append(sortedKeys, k)\n\t\t\t}\n\n\t\t\t// Create output file\n\t\t\tfileName := \"mr-out-\" + strconv.Itoa(taskID)\n\t\t\tofile, _ := os.Create(fileName)\n\n\t\t\t// Perform reduce on each sorted key\n\t\t\tfor i:=0; i<len(sortedKeys); i++{\n\t\t\t\tcount := reducef(sortedKeys[i], kv_map[sortedKeys[i]])\n\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", sortedKeys[i], count)\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (m *ExpressionMachine) Run(w io.Writer) {\n\tfor m.Exp.Reducible() {\n\t\tfmt.Fprintln(w, m.Exp)\n\t\tm.Step()\n\t}\n\tfmt.Fprintln(w, m.Exp)\n}", "func (c *ConsensusState) run() {\n\tfor {\n\t\tselect {\n\t\tcase op := <-c.accessOp:\n\t\t\tlogger.Debugf(\"cycle %v execute op\", c.cycleId)\n\t\t\top.Execute()\n\t\t}\n\t}\n}", "func (p *blockParser) run() {\n\tfor p.state = parseBegin; p.state != nil; {\n\t\tp.state = p.state(p)\n\t}\n\tclose(p.blockChan)\n}", "func (r *sinkRunner) run(pipeID, componentID string, cancel chan struct{}, in <-chan message, meter *meter) <-chan error {\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(errc)\n\t\tcall(r.reset, pipeID, errc) // reset hook\n\t\tvar m message\n\t\tvar ok bool\n\t\tfor {\n\t\t\t// receive new message\n\t\t\tselect {\n\t\t\tcase m, ok = <-in:\n\t\t\t\tif !ok {\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tm.params.applyTo(componentID) // apply params\n\t\t\terr := r.fn(m.Buffer) // sink a buffer\n\t\t\tif err != nil {\n\t\t\t\terrc <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmeter = meter.sample(int64(m.Buffer.Size())).message()\n\n\t\t\tm.feedback.applyTo(componentID) // apply feedback\n\t\t}\n\t}()\n\n\treturn errc\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tdelay := 0\n\tgo runMapTasks(mapf, delay, &wg)\n\twg.Add(1)\n\tgo runReduceTasks(reducef, delay, &wg)\n\n\twg.Wait()\n\t// fmt.Println(\"job completed\")\n}", "func (c *causality) run() {\n\tfor j := range c.inCh {\n\t\tmetrics.QueueSizeGauge.WithLabelValues(c.task, \"causality_input\", c.source).Set(float64(len(c.inCh)))\n\n\t\tstartTime := time.Now()\n\t\tif j.tp == flush {\n\t\t\tc.reset()\n\t\t} else {\n\t\t\tkeys := j.dml.identifyKeys()\n\t\t\t// detectConflict before add\n\t\t\tif c.detectConflict(keys) {\n\t\t\t\tc.logger.Debug(\"meet causality key, will generate a conflict job to flush all sqls\", zap.Strings(\"keys\", keys))\n\t\t\t\tc.outCh <- newConflictJob()\n\t\t\t\tc.reset()\n\t\t\t}\n\t\t\tj.dml.key = c.add(keys)\n\t\t\tc.logger.Debug(\"key for keys\", zap.String(\"key\", j.dml.key), zap.Strings(\"keys\", keys))\n\t\t}\n\t\tmetrics.ConflictDetectDurationHistogram.WithLabelValues(c.task, c.source).Observe(time.Since(startTime).Seconds())\n\n\t\tc.outCh <- j\n\t}\n}", "func (m *Migrator) Run() {\n\t// filename->key->val map\n\ti18nMultiMap := make(map[string]map[string]string)\n\n\t// run the workers to search for query\n\tfor _, src := range m.src {\n\t\tfilepath.Walk(src, func(path string, file os.FileInfo, err error) error {\n\t\t\tif !file.IsDir() {\n\t\t\t\tm.wg.Add(1)\n\t\t\t\tgo m.searchInFile(path)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t// wait for workers and then close the result chan\n\tgo func() {\n\t\tm.wg.Wait()\n\t\tclose(m.result)\n\t}()\n\n\t// consume the result chan\n\tfor item := range m.result {\n\t\t_, ok := i18nMultiMap[item.filename]\n\t\tif !ok {\n\t\t\ti18nMultiMap[item.filename] = make(map[string]string)\n\t\t}\n\t\ti18nMultiMap[item.filename][item.key] = item.value\n\t}\n\n\t// write files\n\tfor filename, i18nMap := range i18nMultiMap {\n\t\tif len(i18nMap) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tf, err := os.Create(fmt.Sprintf(\"%s/%s\", m.dst, filename))\n\t\tdefer f.Close()\n\t\tcheck(err)\n\n\t\tfor _, key := range m.keys {\n\t\t\tif len(key) == 0 || strings.HasPrefix(key, \"#\") {\n\t\t\t\tf.WriteString(fmt.Sprintln(key))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tval, ok := i18nMap[key]\n\t\t\tif ok {\n\t\t\t\tf.WriteString(fmt.Sprintln(val))\n\t\t\t}\n\t\t}\n\t}\n}", "func (conn *db) runMap(stmt Stmt, mapper MapMapper) (rowsReturned int, err error) {\n\tif err = conn.Connect(); err != nil {\n\t\treturn\n\t}\n\n\tvar (\n\t\tstmtx *sqlx.Stmt\n\t\trows *sqlx.Rows\n\t\tt time.Time\n\t)\n\n\tif conn.hasProfiling() {\n\t\tt = time.Now()\n\t}\n\n\tstmtx, err = preparex(conn, stmt)\n\tif err == nil {\n\t\tdefer stmtx.Close()\n\t\trows, err = stmtx.Queryx(stmt.Args()...)\n\t\tif err == nil {\n\t\t\tdefer rows.Close()\n\n\t\t\trow := map[string]any{}\n\t\t\tfor rows.Next() {\n\t\t\t\terr = rows.MapScan(row)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tmapper(row)\n\t\t\t\trowsReturned++\n\t\t\t}\n\t\t} else if errors.Is(err, sql.ErrNoRows) {\n\t\t\tif !conn.env.ErrorNoRows {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil && conn.hasVerbose() {\n\t\tconn.logErr.Println(err.Error())\n\t}\n\n\tconn.profilingStmt(stmt, err, t)\n\treturn\n}", "func (bl *LogBuffer) run() {\n\tfor {\n\t\tmsg, err := bl.ringBuffer.Pop()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif err := bl.logger.WriteLogMessage(msg); err != nil {\n\t\t\tlogrus.Debugf(\"failed to write log %v with log driver %s\", msg, bl.logger.Name())\n\t\t}\n\t}\n}", "func (j job) run() bool {\n\n // ignore data file processed by this job in case of a panic\n defer func() bool {\n if r := recover(); r != nil {\n log.Printf(\"Warning: Failed to parse file %s. Ignoring file. \" +\n \"Did you pick the correct column??\", j.fileName)\n }\n return false\n }()\n\n // main processing\n file, err := os.Open(j.fileName)\n if err != nil {\n log.Printf(\"Warning: Failed to open file %s. Ignoring file.\\n\",\n j.fileName)\n return false\n }\n defer file.Close()\n\n scanner := bufio.NewScanner(file)\n output := make([]float64,0)\n for scanner.Scan() {\n col, err := strconv.ParseFloat(strings.Fields(scanner.Text())[j.colID], 64)\n if err != nil {\n log.Printf(\"Warning: Failed to parse file %s. Ignoring file.\\n\",\n j.fileName)\n return false\n }\n\n output = append(output, col)\n }\n\n j.results <- output\n return true\n}", "func (m *Multiplexer) run(carrier Carrier) {\n\t// Start the reader Goroutine and monitor for its termination.\n\theartbeats := make(chan struct{}, 1)\n\treadErrors := make(chan error, 1)\n\tgo func() {\n\t\treadErrors <- m.read(carrier, heartbeats)\n\t}()\n\n\t// Start the writer Goroutine and monitor for its termination.\n\twriteErrors := make(chan error, 1)\n\tgo func() {\n\t\twriteErrors <- m.write(carrier)\n\t}()\n\n\t// Start the state accumulation/transmission Goroutine. It will only\n\t// terminate when the multiplexer is closed.\n\tgo m.enqueue()\n\n\t// Create a timer to enforce heartbeat reception and defer its shutdown. If\n\t// inbound heartbeats are not required, then just leave the timer stopped.\n\theartbeatTimeout := time.NewTimer(m.configuration.MaximumHeartbeatReceiveInterval)\n\tif m.configuration.MaximumHeartbeatReceiveInterval > 0 {\n\t\tdefer heartbeatTimeout.Stop()\n\t} else {\n\t\tif !heartbeatTimeout.Stop() {\n\t\t\t<-heartbeatTimeout.C\n\t\t}\n\t}\n\n\t// Loop until failure or multiplexer closure.\n\tfor {\n\t\tselect {\n\t\tcase <-heartbeats:\n\t\t\tif m.configuration.MaximumHeartbeatReceiveInterval > 0 {\n\t\t\t\tif !heartbeatTimeout.Stop() {\n\t\t\t\t\t<-heartbeatTimeout.C\n\t\t\t\t}\n\t\t\t\theartbeatTimeout.Reset(m.configuration.MaximumHeartbeatReceiveInterval)\n\t\t\t}\n\t\tcase err := <-readErrors:\n\t\t\tm.closeWithError(fmt.Errorf(\"read error: %w\", err))\n\t\t\treturn\n\t\tcase err := <-writeErrors:\n\t\t\tm.closeWithError(fmt.Errorf(\"write error: %w\", err))\n\t\t\treturn\n\t\tcase <-heartbeatTimeout.C:\n\t\t\tm.closeWithError(errors.New(\"heartbeat timeout\"))\n\t\t\treturn\n\t\tcase <-m.closed:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (mr *MapReduce) RunMaster() []int {\n\tnumMapJobs := mr.nMap\n\tnumReduceJobs := mr.nReduce\n\tvar w sync.WaitGroup\n\n\tfor mapJob := 0; mapJob < numMapJobs; mapJob++ {\n\t\tavailableWorker := <-mr.registerChannel\n\t\tfmt.Println(\"USING WORKER\", availableWorker, \"for Map Job\")\n\t\tw.Add(1)\n\t\tgo func(worker string, i int) {\n\t\t\tdefer w.Done()\n\t\t\tvar reply DoJobReply\n\t\t\targs := &DoJobArgs{mr.file, Map, i, mr.nReduce}\n\t\t\tok := call(worker, \"Worker.DoJob\", args, &reply)\n\t\t\tif !ok {\n\t\t\t\tfmt.Println(\"Map Job\", i, \"has FAILED\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Map Job\", i, \"is SUCCESS\")\n\t\t\t}\n\t\t\tmr.registerChannel <- worker\n\t\t}(availableWorker, mapJob)\n\t}\n\n\tw.Wait()\n\tfmt.Println(\"DONE WITH ALL MAP JOBS\")\n\n\tfor reduceJob := 0; reduceJob < numReduceJobs; reduceJob++ {\n\t\tavailableWorker := <-mr.registerChannel\n\t\tfmt.Println(\"USING WORKER\", availableWorker, \"for Reduce Job\")\n\t\tw.Add(1)\n\t\tgo func(worker string, i int) {\n\t\t\tdefer w.Done()\n\t\t\tvar reply DoJobReply\n\t\t\targs := &DoJobArgs{mr.file, Reduce, i, mr.nMap}\n\t\t\tok := call(worker, \"Worker.DoJob\", args, &reply)\n\t\t\tif !ok {\n\t\t\t\tfmt.Println(\"Reduce Job\", i, \"has FAILED\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Reduce Job\", i, \"is SUCCESS\")\n\t\t\t}\n\t\t\tmr.registerChannel <- worker\n\t\t}(availableWorker, reduceJob)\n\t}\n\n\tw.Wait()\n\tfmt.Println(\"DONE WITH ALL REDUCE JOBS\")\n\n\treturn mr.KillWorkers()\n}", "func (nm *NodeMonitor) run(sockPath, bpfRoot string) error {\n\tos.Remove(sockPath)\n\tif err := syscall.Mkfifo(sockPath, 0600); err != nil {\n\t\treturn fmt.Errorf(\"Unable to create named pipe %s: %s\", sockPath, err)\n\t}\n\n\tdefer os.Remove(sockPath)\n\n\tpipe, err := os.OpenFile(sockPath, os.O_RDWR, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to open named pipe for writing: %s\", err)\n\t}\n\n\tdefer pipe.Close()\n\n\tnm.pipeLock.Lock()\n\tnm.pipe = pipe\n\tnm.pipeLock.Unlock()\n\n\tnm.Launcher.SetArgs([]string{\"--bpf-root\", bpfRoot})\n\tif err := nm.Launcher.Run(); err != nil {\n\t\treturn err\n\t}\n\tmetrics.SubprocessStart.WithLabelValues(targetName).Inc()\n\n\tr := bufio.NewReader(nm.GetStdout())\n\tfor nm.GetProcess() != nil {\n\t\tl, err := r.ReadBytes('\\n') // this is a blocking read\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to read stdout from monitor: %s\", err)\n\t\t}\n\n\t\tvar tmp *models.MonitorStatus\n\t\tif err := json.Unmarshal(l, &tmp); err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to unmarshal stdout from monitor: %s\", err)\n\t\t}\n\n\t\tnm.setState(tmp)\n\t}\n\n\treturn fmt.Errorf(\"Monitor process quit unexepctedly\")\n}", "func (jr *joinReader) Run(wg *sync.WaitGroup) {\n\terr := jr.mainLoop()\n\tjr.output.Close(err)\n\tif wg != nil {\n\t\twg.Done()\n\t}\n}", "func (mc *MonitorCore) run(runtimeConf RuntimeConfig, stdin io.Reader, stdout io.Writer) error {\n\tmc.logger.Info(\"Starting Python runner child process\")\n\n\tcmd := exec.CommandContext(mc.ctx, runtimeConf.PythonBinary, runtimeConf.PythonArgs...)\n\tcmd.SysProcAttr = procAttrs()\n\tcmd.Stdin = stdin\n\tcmd.Stdout = stdout\n\tcmd.Env = runtimeConf.PythonEnv\n\n\t// Stderr is just the normal output from the Python code that isn't\n\t// specially encoded\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tmc.logger = mc.logger.WithFields(log.Fields{\n\t\t\"runnerPID\": cmd.Process.Pid,\n\t})\n\n\tgo func() {\n\t\tscanner := utils.ChunkScanner(stderr)\n\t\tfor scanner.Scan() {\n\t\t\tmc.logger.Error(scanner.Text())\n\t\t}\n\t}()\n\n\tif err := cmd.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func _reduce(fn redfn, total int, c chan dict) dict {\n\tfinalMap := make(dict)\n\n\tfor worker := 0; worker < total; worker++ {\n\t\tm := <-c\n\t\tfn(finalMap, m)\n\t}\n\n\treturn finalMap\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\tmapTaskCount, reduceTaskCount := 0, 0\n\tfor true {\n\t\targs, reply := GetTaskArgs{}, GetTaskReply{}\n\t\tcall(\"Master.GetTask\", &args, &reply)\n\n\t\tif reply.TaskType == \"Map\" {\n\t\t\tmapTaskCount++\n\t\t\tdoMap(reply.FilePath, mapf, reply.MapTaskNum, reply.ReduceTaskCount)\n\t\t} else if reply.TaskType == \"Reduce\" {\n\t\t\treduceTaskCount++\n\t\t\tdoReduce(reply.ReduceTaskNum, reducef, reply.FilePathList)\n\t\t} else if reply.TaskType == \"Clean Exit\" {\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n}", "func (r *mutationStreamReader) run() {\n\n\t//panic handler\n\tdefer r.panicHandler()\n\n\tfor {\n\t\tselect {\n\n\t\tcase msg, ok := <-r.streamMutch:\n\n\t\t\tif ok {\n\t\t\t\tswitch msg.(type) {\n\t\t\t\tcase []*protobuf.VbKeyVersions:\n\t\t\t\t\tvbKeyVer := msg.([]*protobuf.VbKeyVersions)\n\t\t\t\t\tr.handleVbKeyVersions(vbKeyVer)\n\n\t\t\t\tdefault:\n\t\t\t\t\tr.handleStreamInfoMsg(msg)\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\t//stream library has closed this channel indicating\n\t\t\t\t//unexpected stream closure send the message to supervisor\n\t\t\t\tlogging.Fatalf(\"MutationStreamReader::run Unexpected Mutation \"+\n\t\t\t\t\t\"Channel Close for Stream %v\", r.streamId)\n\t\t\t\tmsgErr := &MsgError{\n\t\t\t\t\terr: Error{code: ERROR_STREAM_READER_STREAM_SHUTDOWN,\n\t\t\t\t\t\tseverity: FATAL,\n\t\t\t\t\t\tcategory: STREAM_READER}}\n\t\t\t\tr.supvRespch <- msgErr\n\t\t\t}\n\n\t\tcase <-r.killch:\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// init\n\ttaskId = 9999\n\n\t//\n\tfor {\n\t\ttime.Sleep(time.Second)\n\n\t\treply := CallAssign()\n\n\t\t// fmt.Println(reply)\n\n\t\tif reply.TaskId < 0 {\n\t\t\t// fmt.Println(\"Waiting for assigning a work...\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// modify taskId and later will tell master who i am\n\t\ttaskId = reply.TaskId\n\n\t\tif reply.TaskType == \"map\" {\n\t\t\tfile, err := os.Open(reply.FileName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot open %v\", reply.FileName)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\", reply.FileName)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t\tkva := mapf(reply.FileName, string(content))\n\n\t\t\t// sort\n\t\t\t// sort.Sort(ByKey(kva))\n\n\t\t\t// store intermediate kvs in tempFile\n\t\t\ttempFileName := \"tmp-\" + reply.TaskType + \"-\" + strconv.Itoa(reply.TaskId)\n\n\t\t\tfile, err = os.Create(tempFileName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"cannot create %v\", tempFileName)\n\t\t\t}\n\n\t\t\t// transform k,v into json\n\t\t\tenc := json.NewEncoder(file)\n\t\t\tfor _, kv := range kva {\n\t\t\t\terr := enc.Encode(&kv)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t//\n\t\t\tfile.Close()\n\n\t\t\t// try to delay sometime\n\t\t\t// ran := rand.Intn(4)\n\t\t\t// fmt.Printf(\"Sleep %v s\\n\", ran)\n\t\t\t// d := time.Second * time.Duration(ran)\n\t\t\t// time.Sleep(d)\n\n\t\t\t// tell the master the mapwork has done\n\t\t\tCallDoneTask(reply, tempFileName)\n\n\t\t} else if reply.TaskType == \"reduce\" {\n\t\t\t// fmt.Println(reply.TaskType)\n\n\t\t\tkva := []KeyValue{}\n\n\t\t\tfile, err := os.Open(reply.FileName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tdec := json.NewDecoder(file)\n\t\t\tfor {\n\t\t\t\tvar kv KeyValue\n\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tkva = append(kva, kv)\n\t\t\t}\n\n\t\t\toutputFileName := \"mr-out-\" + strconv.Itoa(reply.TaskIndex)\n\t\t\tofile, _ := os.Create(outputFileName)\n\n\t\t\t// sort\n\t\t\t// sort.Sort(ByKey(kva))\n\n\t\t\ti := 0\n\t\t\tfor i < len(kva) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t\t// fmt.Println(output)\n\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\n\t\t\tofile.Close()\n\n\t\t\t// fmt.Printf(\"Reduce task %v has finished.\\n\", reply.TaskIndex)\n\n\t\t\t// ran := rand.Intn(4)\n\t\t\t// fmt.Printf(\"Sleep %v s\\n\", ran)\n\t\t\t// d := time.Second * time.Duration(ran)\n\t\t\t// time.Sleep(d)\n\n\t\t\tCallDoneTask(reply, outputFileName)\n\t\t} else if reply.TaskType == \"close\" {\n\t\t\t// fmt.Println(\"MapReduce has done. Exiting...\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tfmt.Println(\"UnExcepted TaskType\")\n\t\t}\n\n\t}\n\n}", "func (w *SimpleMapReduce) Reduce (reduceFn ReduceFn) *SimpleMapReduce {\n w.reduceFn = reduceFn\n return w\n}", "func (m *M) Run() int" ]
[ "0.73335505", "0.66919565", "0.66014", "0.63472444", "0.63170433", "0.6134042", "0.6113632", "0.61034", "0.6091675", "0.6070612", "0.606971", "0.60531396", "0.6034752", "0.6015037", "0.6009111", "0.6002563", "0.5961895", "0.58987963", "0.58896095", "0.5886267", "0.58567", "0.5818648", "0.5816618", "0.57906616", "0.5777622", "0.57732093", "0.57698184", "0.57318395", "0.57284147", "0.5726023", "0.5715243", "0.5700773", "0.56870085", "0.5684559", "0.5673891", "0.565304", "0.56165075", "0.5615908", "0.5605462", "0.55993336", "0.55942917", "0.55811167", "0.55483574", "0.5527944", "0.5512549", "0.5505453", "0.5500031", "0.54876846", "0.54735243", "0.5470685", "0.54701847", "0.5417423", "0.54076594", "0.5395977", "0.5393011", "0.539107", "0.5386208", "0.5386208", "0.5368835", "0.5336129", "0.53312445", "0.5330048", "0.53277856", "0.53245145", "0.53046376", "0.52947915", "0.52918744", "0.5283362", "0.52519953", "0.52246654", "0.5210884", "0.51889056", "0.5188572", "0.51788443", "0.5170432", "0.51643294", "0.5160975", "0.5126327", "0.5123617", "0.51174337", "0.51069236", "0.5084163", "0.508142", "0.50763613", "0.50611025", "0.505451", "0.50396276", "0.50386256", "0.5035531", "0.5028385", "0.50235164", "0.50200826", "0.49853578", "0.49849668", "0.49803272", "0.496422", "0.49609315", "0.49463537", "0.49134773", "0.49114746" ]
0.7995375
0
emit sends a value to the reducer's output channel.
func (r *reducer) emit(key string, value interface{}) { r.c <- map[string]interface{}{key: value} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c channelConveyor) Emit(v interface{}) error {\n\tc.outputCh <- v\n\n\treturn nil\n}", "func (m *mapper) emit(key int64, value interface{}) {\n\t// Encode the timestamp to the beginning of the key.\n\tbinary.BigEndian.PutUint64(m.key, uint64(key))\n\n\t// OPTIMIZE: Collect emit calls and flush all at once.\n\tm.c <- map[string]interface{}{string(m.key): value}\n}", "func (b Broadcaster) Write(v interface{}) {\n\tutils.Debugf(\"Sending %v\\n\", v)\n\tb.Sendc <- v // write value on send channel\n}", "func (p *blockParser) emit(b Block) {\n\tp.blockChan <- b\n\tp.start = p.cur\n}", "func (s *Scanner) emit(t Token) {\n\ts.Items <- &Item{Lit: s.TokenText(), Pos: s.Position, Tok: t}\n}", "func (bus *EventBus) Emit(msg Message) {\n\tbus.input <- msg\n}", "func (s *server) send(value interface{}) (interface{}, error) {\n\tevent := &ev{target: value, c: make(chan error, 1)}\n\ts.c <- event\n\terr := <-event.c\n\treturn event.returnValue, err\n}", "func (lx *Lexer) emit(t token.Type) {\n\tlx.tokens <- token.Token{\n\t\tType: t,\n\t\tVal: lx.input[lx.start:lx.pos],\n\t\tLine: lx.line,\n\t}\n\tlx.start = lx.pos\n}", "func emitOutput(ctx context.Context, n *node) stateFn {\n\tif n == nil || n.outputC == nil { // OMIT\n\t\treturn nil // OMIT\n\t} // OMIT\n\tselect {\n\tcase <-ctx.Done():\n\t\tn.err = ctx.Err()\n\t\treturn nil\n\tcase n.outputC <- n.output:\n\t}\n\treturn nil\n}", "func (s Sequence) Output(c SeqChan) {s.Do(func(el El){c <- el})}", "func (l *Lexer) emit(t TokenType) {\n\tl.tokens <- Token{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t tokenType) {\n\ti := token{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n\tl.items <- i\n}", "func (l *reader) emit(t itemType) {\n\tl.items <- item{t, l.current.String()}\n\tl.current.Reset()\n\tl.width = 0\n}", "func (p *spanParser) emit(s Span) {\n\tp.spanChan <- s\n\tp.start = p.cur\n}", "func (l *Lexer) emit(t tokenType) {\n\tl.tokens <- NewToken(t, l.start, l.input[l.start:l.pos])\n\tl.start = l.pos\n}", "func (s *scanner) emit(t token) {\n\ts.items <- tokenRef{t, s.start, s.input[s.start:s.pos]}\n\ts.start = s.pos\n}", "func (l *Lexer) emit(t TokenType) {\n\tl.tokens <- Token{t, l.cache()}\n\tl.start = l.pos\n}", "func (e *encoder) emit(bits, nBits uint32) {\n\tnBits += e.nBits\n\tbits <<= 32 - nBits\n\tbits |= e.bits\n\tfor nBits >= 8 {\n\t\tb := uint8(bits >> 24)\n\t\te.writeByte(b)\n\t\tif b == 0xff {\n\t\t\te.writeByte(0x00)\n\t\t}\n\t\tbits <<= 8\n\t\tnBits -= 8\n\t}\n\te.bits, e.nBits = bits, nBits\n}", "func (r *Relay) Emit(ctx context.Context) {\n\n}", "func (c *Connection) Emit(eventName string, data interface{}) {\n\tvar ev event\n\tev.Name = eventName\n\tev.Data = data\n\tc.send <- ev\n}", "func (a *Actor) Send(m string) { a.input <- m }", "func (l *lexer) emit(t TokenType) {\r\n\tl.tokens <- Token{t, l.start, l.input[l.start:l.pos], l.line}\r\n\tl.start = l.pos //move to current pos\r\n}", "func (s Sink) Output(o Output) {\n\ts(o)\n}", "func (t *Tokeniser) emit(typ Type) {\n\ttknEmitted := t.token(typ)\n\tt.prevToken = &tknEmitted\n\tt.Tokens.Push(tknEmitted)\n\tt.ignore()\n}", "func (a aio) output() float64 {\n\tsCh := make(chan float64)\n\ta.oCh <- sCh\n\treturn <-sCh\n}", "func (b *Broadcaster) Write(v interface{}) {\n\tc := make(chan message, 1)\n\tb.mx.Lock()\n\tdefer b.mx.Unlock()\n\n\tb.c <- message{v, c}\n\tb.c = c\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.pos, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t tokenType) {\n\tl.tokens <- token{t, l.start, l.runeCnt, string(l.input[l.start:l.pos])}\n\tl.start = l.pos\n\tl.runeCnt = 0\n}", "func (m *metricBigipNodeDataTransmitted) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (this Client) emit(message interface{}) {\n mu.Lock()\n for _, client := range clients {\n websocket.JSON.Send(client.Websocket, message)\n }\n mu.Unlock()\n}", "func (l *lexer) emit() string {\n ret := l.input[l.start:l.pos]\n l.start = l.pos\n return ret\n}", "func (b *Broadcaster) Send(v interface{}) { b.Sendc <- v }", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (l *lexer) emit(typ tokenType) {\n\tl.tokens = append(l.tokens, token{\n\t\ttyp: typ,\n\t\tval: l.in[l.start:l.pos],\n\t})\n\tl.start = l.pos\n}", "func (l *Lexer) Emit(t Type, value interface{}) {\n\tl.q.push(Item{\n\t\tType: t,\n\t\tPos: l.S,\n\t\tValue: value,\n\t})\n\tl.updateStart()\n}", "func (ms *metricSender) SendValue(name string, value float64, unit string) error {\n\treturn ms.eventEmitter.Emit(&events.ValueMetric{Name: &name, Value: &value, Unit: &unit})\n}", "func (m *metricRedisClientsMaxOutputBuffer) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (lex *Lexer) emit(it TokenType) {\n\tlex.tokens <- Token{it, lex.input[lex.start:lex.pos]}\n\tlex.start = lex.pos\n}", "func (e *Emitter) Emit(topic string, value interface{}) (done chan struct{}) {\n\te.mu.RLock()\n\tdefer e.mu.RUnlock()\n\n\tdone = make(chan struct{})\n\n\tif e.topicListeners == nil {\n\t\tclose(done)\n\t\treturn done\n\t}\n\tlns, ok := e.topicListeners[topic]\n\tif !ok || len(lns) == 0 {\n\t\tclose(done)\n\t\treturn done\n\t}\n\n\tgo func() {\n\t\tdefer close(done)\n\t\tfor _, lnch := range lns {\n\t\t\tlnch <- value\n\t\t}\n\t}()\n\treturn done\n}", "func (op *output) Value() uint64 {\n\treturn op.value\n}", "func (op *output) Value() uint64 {\n\treturn op.value\n}", "func (op *output) Value() uint64 {\n\treturn op.value\n}", "func (l *lexer) emit(k Token) {\n\ti := Item{T: k, Val: l.input[l.start:l.position]}\n\tl.items <- i\n\tl.ignore() // reset our scanner now that we've dispatched a segment\n}", "func (l *lexer) emit(k Token) {\n\ti := Item{T: k, Val: l.input[l.start:l.position]}\n\tl.items <- i\n\tl.ignore() // reset our scanner now that we've dispatched a segment\n}", "func (l *Lexer) emit(t itemType) {\n\tl.items <- item{t, l.blob()}\n\tl.start = l.pos\n}", "func (l *lexer) emit(t token.ItemType) {\n\tl.Items <- token.Token{t, l.start, l.input[l.start:l.pos]}\n\tl.start = l.pos\n}", "func (m *metricRedisLatestFork) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (w *Writer) Emit(typ byte, args ...uint64) {\n\tnargs := byte(len(args)) - 1\n\tif nargs > 3 {\n\t\tnargs = 3\n\t}\n\tbuf := []byte{typ | nargs<<6}\n\tif nargs == 3 {\n\t\tbuf = append(buf, 0)\n\t}\n\tfor _, a := range args {\n\t\tbuf = appendVarint(buf, a)\n\t}\n\tif nargs == 3 {\n\t\tbuf[1] = byte(len(buf) - 2)\n\t}\n\tn, err := w.Write(buf)\n\tif n != len(buf) || err != nil {\n\t\tpanic(\"failed to write\")\n\t}\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos], l.startLine}\n\tl.start = l.pos\n\tl.startLine = l.line\n}", "func (m *metricRedisClientsMaxInputBuffer) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (p *program) doWriteOutput(i *instruction) {\n if p.outChannel != nil {\n p.outChannel <- i.params[0].value\n } else {\n p.dataStack = append(p.dataStack, i.params[0].value)\n }\n p.position += i.length\n\n if p.haltOnOutput {\n p.halt = true\n }\n}", "func (c *Compiler) emit(op operation.Opcode, operands ...int) int {\n\tins := operation.NewInstruction(op, operands...)\n\tpos := c.addInstruction(ins)\n\tc.setEmitted(op, pos)\n\treturn pos\n}", "func (m *metricRedisNetInput) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisConnectionsReceived) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (c *IChan) Send(value []byte) error {\n\tcheckState(c.input)\n\treturn c.send(value)\n}", "func (m *metricBigipVirtualServerDataTransmitted) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisNetOutput) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (r *Room) Emit(actionType string, data interface{}) {\n\tfor s := range r.sockets {\n\t\ts.Emit(actionType, data)\n\t}\n}", "func (e *Stream) Emit(data string) error {\n\t_, err := fmt.Fprintf(e.writer, \"data: %s\\n\\n\", data)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.flusher.Flush()\n\n\treturn nil\n}", "func (tt *Tester) handleEmit(topic string, key string, value []byte, options ...EmitOption) *goka.Promise {\n\topts := new(emitOption)\n\topts.applyOptions(options...)\n\t_, finisher := goka.NewPromiseWithFinisher()\n\toffset := tt.pushMessage(topic, key, value, opts.headers)\n\treturn finisher(&sarama.ProducerMessage{Offset: offset}, nil)\n}", "func (c *ClickhouseOutput) Emit(event map[string]interface{}) {\n\tc.mux.Lock()\n\tc.events = append(c.events, event)\n\tif len(c.events) < c.bulk_actions {\n\t\tc.mux.Unlock()\n\t\treturn\n\t}\n\n\tevents := c.events\n\tc.events = make([]map[string]interface{}, 0, c.bulk_actions)\n\tc.mux.Unlock()\n\n\tc.bulkChan <- events\n}", "func (l *lexer) emit(t itemType) {\n\tl.items <- item{t, l.start, l.input[l.start:l.pos], l.line}\n\tl.prevItemType = t\n\tl.start = l.pos\n}", "func (m *metricRedisCmdUsec) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricMysqlIndexIoWaitTime) emit(metrics pmetric.MetricSlice) {\n\tif m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func Emit(ctx context.Context, value []byte, to Recipient) error {\n\t// TombstonesDelay doesn't matter for Add.\n\td := dsset.Set{Parent: to.Key}\n\t// Keep IDs well distributed, but record creation time in it.\n\t// See also oldestEventAge().\n\tid := fmt.Sprintf(\"%s/%d\", uuid.New().String(), clock.Now(ctx).UnixNano())\n\tif err := d.Add(ctx, []dsset.Item{{ID: id, Value: value}}); err != nil {\n\t\treturn errors.Annotate(err, \"failed to send event\").Err()\n\t}\n\tmetricSent.Add(ctx, 1, to.MonitoringString)\n\treturn nil\n}", "func (s *Socket) Emit(event event, msgType MessageType, args interface{}) (err error) {\n\tvar pktType PacketType\n\tswitch event {\n\tcase EventOpen:\n\t\tpktType = PacketTypeOpen\n\tcase EventMessage:\n\t\tpktType = PacketTypeMessage\n\tcase EventClose:\n\t\tpktType = PacketTypeClose\n\t// case EventError:\n\t// case EventUpgrade:\n\tcase EventPing:\n\t\tpktType = PacketTypePing\n\tcase EventPong:\n\t\tpktType = PacketTypePong\n\tdefault:\n\t\treturn\n\t}\n\tvar data []byte\n\tif d, ok := args.([]byte); ok {\n\t\tdata = d\n\t} else if s, ok := args.(string); ok {\n\t\tdata = []byte(s)\n\t} else {\n\t\tdata, err = json.Marshal(args)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn s.emitter.submit(&Packet{msgType: msgType, pktType: pktType, data: data})\n}", "func (m *metricAerospikeNodeConnectionCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisCommandsProcessed) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (o StreamOptimizer) Output(ctx context.Context, c StreamConsumer) (err error) {\n\tfor change := range o.changeQ {\n\n\t\t// There are two reasons we may want to abort early. Either the context has terminated, or there was an error\n\t\t// during processing\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// If error is non-nil, we'll abort upon next iteration.\n\t\terr = c.Send(change)\n\t}\n\n\treturn\n}", "func (m *metricBigipPoolMemberDataTransmitted) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricFlinkMemoryManagedUsed) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (n *JsonStream) Send(v interface{}) error {\n\tif n.ctx.Err() != nil {\n\t\treturn n.ctx.Err()\n\t}\n\n\tvar buf bytes.Buffer\n\tenc := codec.NewEncoder(&buf, structs.JsonHandleWithExtensions)\n\terr := enc.Encode(v)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error marshaling json for stream: %w\", err)\n\t}\n\n\tselect {\n\tcase <-n.ctx.Done():\n\t\treturn fmt.Errorf(\"error stream is no longer running: %w\", err)\n\tcase n.outCh <- &structs.EventJson{Data: buf.Bytes()}:\n\t}\n\n\treturn nil\n}", "func (g *gaugeMetric) Emit(c LogClient) {\n\toptions := []loggregator.EmitGaugeOption{\n\t\tloggregator.WithGaugeValue(\n\t\t\tg.name,\n\t\t\ttoFloat64(atomic.LoadUint64(&g.value), 2),\n\t\t\tg.unit,\n\t\t),\n\t\tg.sourceIDOption,\n\t}\n\n\tfor k, v := range g.tags {\n\t\toptions = append(options, loggregator.WithEnvelopeTag(k, v))\n\t}\n\n\tc.EmitGauge(options...)\n}", "func (fmp *FlatMap) OutputChan() <-chan interface{} {\n\treturn fmp.out\n}", "func (m *metricBigipPoolDataTransmitted) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipNodeConnectionCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricFlinkJvmCPUTime) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisCommands) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipNodeEnabled) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipNodeRequestCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisSlavesConnected) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func Emit(conn *dbus.Conn, s Signal) error {\n\treturn conn.Emit(s.path(), s.Interface()+\".\"+s.Name(), s.values()...)\n}", "func (m *metricRedisMemoryUsed) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisClientsBlocked) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricAerospikeNodeConnectionOpen) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisClientsConnected) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipNodePacketCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricBigipNodeSessionCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricMysqlIndexIoWaitCount) emit(metrics pmetric.MetricSlice) {\n\tif m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisRole) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func writer(coord string) {\n\tbroadcast <- coord\n}", "func (m *metricRedisCmdCalls) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (i *Input) SetValue(value Message) error {\n\t// we store the marshalled value in the Input so we can access it later\n\ti.Lock()\n\ti.Value = value\n\ti.Unlock()\n\n\t// then, to set an input to a particular value, we just push\n\t// that value to that input, as though we had a little pusher block.\n\n\t// first kill any existing value pusher\n\tstopValuePusher(i)\n\n\t// then set the pusher going\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase i.Connection <- value:\n\t\t\tcase <-i.quitChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}", "func (m *metricActiveDirectoryDsNotificationQueued) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricSshcheckSftpStatus) emit(metrics pmetric.MetricSlice) {\n\tif m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricSshcheckStatus) emit(metrics pmetric.MetricSlice) {\n\tif m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *metricRedisKeyspaceHits) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}" ]
[ "0.7070084", "0.6710396", "0.6272342", "0.6061797", "0.59561294", "0.5938369", "0.5937713", "0.5937449", "0.58059376", "0.57770425", "0.5764841", "0.5728065", "0.57185686", "0.5700262", "0.56946415", "0.5682005", "0.5662726", "0.5661662", "0.5653689", "0.56264323", "0.56157774", "0.55973655", "0.5596187", "0.55869937", "0.55823654", "0.55621153", "0.5555855", "0.55482197", "0.5544769", "0.5538461", "0.55321246", "0.5518272", "0.551715", "0.55077016", "0.55077016", "0.55077016", "0.54985416", "0.54978764", "0.5491316", "0.54802054", "0.54765123", "0.54656774", "0.5459561", "0.5459561", "0.5459561", "0.54567087", "0.54567087", "0.5450022", "0.54493856", "0.5385701", "0.53797245", "0.5332826", "0.53174895", "0.5304909", "0.52978086", "0.5282988", "0.52677643", "0.52675384", "0.5265652", "0.52627796", "0.52483547", "0.52443945", "0.52328706", "0.5216047", "0.5216027", "0.5193144", "0.5189542", "0.51884466", "0.5181268", "0.5171165", "0.5168362", "0.5165621", "0.5161015", "0.5158129", "0.51568866", "0.5153941", "0.5153714", "0.51495266", "0.51471615", "0.51418674", "0.5141416", "0.51357055", "0.51339084", "0.51326525", "0.5127999", "0.5126344", "0.51189554", "0.5115152", "0.51085407", "0.50844854", "0.5083799", "0.5075896", "0.5075808", "0.50752556", "0.507158", "0.5070621", "0.50670457", "0.5065834", "0.5062011", "0.5058057" ]
0.7351743
0
reduceSum computes the sum of values for each key.
func reduceSum(key string, values []interface{}, r *reducer) { var n float64 for _, v := range values { n += v.(float64) } r.emit(key, n) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Map) ReduceIntSum(reduce func(map[interface{}]interface{}) int) int {\n\tresult := 0\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceInt(reduce)\n\t}\n\treturn result\n}", "func (m *Map) ReduceStringSum(reduce func(map[interface{}]interface{}) string) string {\n\tresult := \"\"\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceString(reduce)\n\t}\n\treturn result\n}", "func (m *Map) ReduceFloat64Sum(reduce func(map[interface{}]interface{}) float64) float64 {\n\tresult := float64(0)\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceFloat64(reduce)\n\t}\n\treturn result\n}", "func ReduceSum(values []interface{}) interface{} {\n\tvar n float64\n\tcount := 0\n\tvar resultType NumberType\n\tfor _, v := range values {\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcount++\n\t\tswitch n1 := v.(type) {\n\t\tcase float64:\n\t\t\tn += n1\n\t\tcase int64:\n\t\t\tn += float64(n1)\n\t\t\tresultType = Int64Type\n\t\t}\n\t}\n\tif count > 0 {\n\t\tswitch resultType {\n\t\tcase Float64Type:\n\t\t\treturn n\n\t\tcase Int64Type:\n\t\t\treturn int64(n)\n\t\t}\n\t}\n\treturn nil\n}", "func reduceFunc(input []mapreduce.KeyValue) (result []mapreduce.KeyValue) {\r\n\t// \tMaybe it's easier if we have an auxiliary structure? Which one?\r\n\t//\r\n\t// \tYou can check if a map have a key as following:\r\n\t// \t\tif _, ok := myMap[myKey]; !ok {\r\n\t//\t\t\t// Don't have the key\r\n\t//\t\t}\r\n\t//\r\n\t// \tReduce will receive KeyValue pairs that have string values, you may need\r\n\t// \tconvert those values to int before being able to use it in operations.\r\n\t// \tpackage strconv: func Atoi(s string) (int, error)\r\n\t//\r\n\t// \tIt's also possible to receive a non-numeric value (i.e. \"+\"). You can check the\r\n\t// \terror returned by Atoi and if it's not 'nil', use 1 as the value.\r\n\r\n\t/////////////////////////\r\n\t// YOUR CODE GOES HERE //\r\n\t/////////////////////////\r\n\r\n\tresult = make([]mapreduce.KeyValue, 0)\r\n\r\n\t// This auxiliary map is used to count the number of occurrences of a certain key\r\n\tauxiliaryMap := make(map[string]int)\r\n\r\n\tfor _, keyValuePair := range input {\r\n\r\n\t\t// If the value in the input key-value pair is numeric, then parse the value and update\r\n\t\t// the corresponding value in the auxiliary map.\r\n\t\t// If a key is not in the auxiliary map, 0 is returned as the corresponding value.\r\n\t\t// Using that fact, it's not necessary to check for the key.\r\n\t\tif value, err := strconv.Atoi(keyValuePair.Value); err == nil {\r\n\t\t\tauxiliaryMap[keyValuePair.Key] += value\r\n\r\n\t\t// If it's a non-numeric value, count as 1 occurrence\r\n\t\t// This considers that all possible non-numeric values are equivalent \r\n\t\t// (e.g. \"-\" or \"+\" have the same meaning when used as values)\r\n\t\t} else {\r\n\t\t\tauxiliaryMap[keyValuePair.Key] += 1\r\n\t\t}\r\n\t}\r\n\r\n\t// Convert the key-value pairs in auxiliary map to the output format (array of mapreduce.KeyValue structs)\r\n\tfor key, value := range auxiliaryMap {\r\n\t\tresult = append(result, mapreduce.KeyValue{key, strconv.Itoa(value)})\r\n\t}\r\n\treturn result\r\n}", "func (mr MrImpl) Reduce(key string, values []string) string {\n\tcounter := 0\n\tfor _, v := range values {\n\t\tval, err := strconv.Atoi(v)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tcounter += val\n\t}\n\n\treturn fmt.Sprintf(\"%d\", counter)\n}", "func (m *Map) ParallelReduceIntSum(reduce func(map[interface{}]interface{}) int) int {\n\tvar recur func(splits []Split) int\n\trecur = func(splits []Split) int {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceInt(reduce)\n\t\t}\n\t\tvar left, right int\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceInt(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceInt(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}", "func Reduce(key string, values *list.List) string { \n\n // initialize total to 0\n // for every value in the list\n // convert the value to an integer\n // check for an error! :) \n // add the integer value to the total\n // return the total\n\n\tvar total = 0\n\t\n\tfor e := values.Front(); e != nil; e = e.Next() {\n \n if val, err := strconv.Atoi(e.Value.(string)); err == nil { \n total += val\n } else {\n fmt.Printf(\"Error converting the interface to an integer\\n\")\n }\n\t}\n\n return strconv.Itoa(total) \n\n}", "func Sum(items []Value) (op int) {\n\tfor _, item := range items {\n\t\top += item.Value()\n\t}\n\treturn\n}", "func (m *Map) ParallelReduceStringSum(reduce func(map[interface{}]interface{}) string) string {\n\tvar recur func(splits []Split) string\n\trecur = func(splits []Split) string {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceString(reduce)\n\t\t}\n\t\tvar left, right string\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceString(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceString(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}", "func (m *Map) ParallelReduceFloat64Sum(reduce func(map[interface{}]interface{}) float64) float64 {\n\tvar recur func(splits []Split) float64\n\trecur = func(splits []Split) float64 {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceFloat64(reduce)\n\t\t}\n\t\tvar left, right float64\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceFloat64(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceFloat64(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}", "func Sum(values ...float64) float64 {\n\tres := 0.0\n\n\tfor _, v := range values {\n\t\tres += v\n\t}\n\n\treturn res\n}", "func (m mathUtil) Sum(values ...float64) float64 {\n\tvar total float64\n\tfor _, v := range values {\n\t\ttotal += v\n\t}\n\treturn total\n}", "func Sum(d []float64) (sum float64) {\n\tfor x := 0; x < len(d); x++ {\n\t\tsum += d[x]\n\t}\n\treturn\n}", "func mapSum(itr Iterator, m *mapper) {\n\tn := float64(0)\n\tfor k, v := itr.Next(); k != 0; k, v = itr.Next() {\n\t\tn += v.(float64)\n\t}\n\tm.emit(itr.Time(), n)\n}", "func (h *Hash) Sum(b []byte) []byte {\n\tx := h.Sum64()\n\treturn append(b,\n\t\tbyte(x>>0),\n\t\tbyte(x>>8),\n\t\tbyte(x>>16),\n\t\tbyte(x>>24),\n\t\tbyte(x>>32),\n\t\tbyte(x>>40),\n\t\tbyte(x>>48),\n\t\tbyte(x>>56))\n}", "func wcReduceF(key string, values []string) string {\n\t// TODO: you also have to write this function\n\treturnValue := 0\n\n\tfor _, v:= range values{\n\t\tintValue, _ := strconv.Atoi(v)\n\t\treturnValue += intValue\n\t}\n\n\treturn strconv.Itoa(returnValue)\n}", "func MergeSum(vals []uint64) uint64 {\n\trv := vals[0]\n\tfor _, v := range vals[1:] {\n\t\trv += v\n\t}\n\treturn rv\n}", "func Sum(v []float64) float64 {\n\ttotal := 0.0\n\tfor _, number := range v {\n\t\ttotal = total + number\n\t}\n\treturn total\n}", "func Sum(vals ...float64) float64 {\n\tsum := 0.0\n\tfor _, v := range vals {\n\t\tsum += v\n\t}\n\treturn sum\n}", "func (d *RabinKarp64) Sum(b []byte) []byte {\n\tv := d.Sum64()\n\treturn append(b, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))\n}", "func Sum(sl []float64) float64 {\n\tres := float64(0)\n\tfor _, val := range sl {\n\t\tres += val\n\t}\n\treturn res\n}", "func (xxh xxHash) Sum(b []byte) []byte {\n\th64 := xxh.Sum64()\n\treturn append(b, byte(h64), byte(h64>>8), byte(h64>>16), byte(h64>>24), byte(h64>>32), byte(h64>>40), byte(h64>>48), byte(h64>>56))\n}", "func (d *digest) Sum(in []byte) []byte {\r\n\ts := d.Sum64()\r\n\treturn append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))\r\n}", "func (h *HashReader) Sum() []byte {\n\treturn h.hasher.Sum(nil)\n}", "func DoSum() float64", "func (g *Graph) ReduceSum(x Node) Node {\n\treturn g.NewOperator(fn.NewReduceSum(x), x)\n}", "func (a SumAggregator) Aggregate(values []float64) float64 {\n\tresult := 0.0\n\tfor _, v := range values {\n\t\tresult += v\n\t}\n\treturn result\n}", "func Sum(field string) AggregateFunc {\n\treturn func(start, end string) (string, *dsl.Traversal) {\n\t\tif end == \"\" {\n\t\t\tend = DefaultSumLabel\n\t\t}\n\t\treturn end, __.As(start).Unfold().Values(field).Sum().As(end)\n\t}\n}", "func Sum(input []float64) (sum float64) {\n\tfor _, v := range input {\n\t\tvar fuel float64\n\t\tfuel += calculator(v, fuel)\n\t\tsum += fuel\n\t}\n\treturn sum\n}", "func (h *hmacsha256) Sum() []byte {\n\th.outer.Reset()\n\th.outer.Write(h.opad[:])\n\th.outer.Write(h.inner.Sum(nil))\n\treturn h.outer.Sum(nil)\n}", "func SumNumbers[K comparable, V Number](m map[K]V) V {\n\tvar s V\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}", "func sum(b *bolt.Bucket, fn func([]byte) int) (int, error) {\n\tsum := 0\n\terr := b.ForEach(func(_, v []byte) error {\n\t\tsum += fn(v)\n\t\treturn nil\n\t})\n\treturn sum, err\n}", "func Sum(numbers []int) int {\n\tadd := func(acc, x int) int { return acc + x }\n\treturn Reduce(numbers, add, 0)\n}", "func reduceF(key string, values []string) string {\n\t// TODO: you also have to write this function\n\tcount := 0\n\tfor _, value := range values {\n\t\tnum, _ := strconv.ParseInt(value, 10, 64)\n\t\tcount = count + int(num)\n\t}\n\treturn strconv.Itoa(count)\n}", "func Sum(xs ...float64) float64 {\n\tvar s float64\n\tfor i := 0; i < len(xs); i++ {\n\t\ts += xs[i]\n\t}\n\n\treturn s\n}", "func Reduce(key string, values []string) string {\n\txSum := 0.0\n\tySum := 0.0\n\tfor _, pointStr := range values {\n\t\tpointSplit := strings.Split(pointStr, \" \")\n\t\txStr := pointSplit[0]\n\t\tyStr := pointSplit[1]\n\t\txVal, error := strconv.ParseFloat(xStr, 64)\n\t\tif error != nil {\n\t\t\tfmt.Println(\"Error in Map X:\", error)\n\t\t}\n\t\tyVal, error := strconv.ParseFloat(yStr, 64)\n\t\tif error != nil {\n\t\t\tfmt.Println(\"Error in Map Y:\", error)\n\t\t}\n\t\txSum += xVal\n\t\tySum += yVal\n\t}\n\txAvg := xSum / float64(len(values))\n\tyAvg := ySum / float64(len(values))\n\treturn fmt.Sprintf(\"%f %f %s\", xAvg, yAvg, key)\n}", "func (m AddressAmountMap) Sum() uint64 {\n\tvar sum uint64\n\tfor _, amount := range m {\n\t\tsum += amount\n\t}\n\treturn sum\n}", "func TestUserDefinedTableReduce(t *testing.T) {\n\ttable := newUDTestTable(hash.Int(0), 100)\n\tsess := gqltest.NewSession()\n\tsess.SetGlobal(\"udtable\", gql.NewTable(table))\n\texpect.That(t,\n\t\t// Reduce by the last digit of the string key.\n\t\tgqltest.ReadTable(gqltest.Eval(t,\n\t\t\t`udtable | reduce({regexp_replace($key1, \"^str...(.).*\", \"$1\")}, _acc+_val, map:=$key0, shards:=1)`, sess)),\n\t\th.WhenSorted(h.ElementsAre(\n\t\t\t\"{key:{f0:0},value:10450}\",\n\t\t\t\"{key:{f0:1},value:10460}\",\n\t\t\t\"{key:{f0:2},value:10470}\",\n\t\t\t\"{key:{f0:3},value:10480}\",\n\t\t\t\"{key:{f0:4},value:10490}\",\n\t\t\t\"{key:{f0:5},value:10500}\",\n\t\t\t\"{key:{f0:6},value:10510}\",\n\t\t\t\"{key:{f0:7},value:10520}\",\n\t\t\t\"{key:{f0:8},value:10530}\",\n\t\t\t\"{key:{f0:9},value:10540}\")))\n}", "func Sum(in []int) (total int) {\n\ttotal = 0\n\tfor _, v := range in {\n\t\ttotal += v\n\t}\n\treturn\n}", "func TestSum(t *testing.T) {\n\ttestMap := [][]int{\n\t\t{1, 1, 2},\n\t\t{2, 2, 4},\n\t\t{4, 4, 8},\n\t\t{5, 15, 20},\n\t}\n\n\tfor _, v := range testMap {\n\t\tif i := Sum(v[0], v[1]); i != v[2] {\n\t\t\tt.Errorf(\"Error at Sum(%d, %d) returned %d\", v[0], v[1], i)\n\t\t}\n\t}\n}", "func (d *state) Sum(in []byte) []byte {\n\t// Make a copy of the original hash so that caller can keep writing\n\t// and summing.\n\tdup := d.clone()\n\thash := make([]byte, dup.outputLen)\n\tdup.Read(hash)\n\treturn append(in, hash...)\n}", "func (s VectOp) Reduce(f fs.ReduceOperator, identity float64) float64 {\n\tresult := identity\n\tfor _, val := range s {\n\t\tresult = f(val, result)\n\t}\n\treturn result\n}", "func Sum(by []string, input []*oproto.ValueStream) []*oproto.ValueStream {\n\toutput := []*oproto.ValueStream{{Variable: input[0].Variable}}\n\tiPos := make([]int, len(input))\n\tfor {\n\t\tvalues := []float64{}\n\t\ttimestamps := []uint64{}\n\t\tfor i := 0; i < len(input); i++ {\n\t\t\tif iPos[i] >= len(input[i].Value) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif input[i] != nil {\n\t\t\t\tvalues = append(values, input[i].Value[iPos[i]].GetDouble())\n\t\t\t\ttimestamps = append(timestamps, input[i].Value[iPos[i]].Timestamp)\n\t\t\t}\n\t\t\tiPos[i]++\n\t\t}\n\t\tif len(values) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tvar total float64\n\t\tfor _, i := range values {\n\t\t\ttotal += i\n\t\t}\n\t\tvar tsTotal uint64\n\t\tfor _, i := range timestamps {\n\t\t\ttsTotal += i\n\t\t}\n\t\toutput[0].Value = append(output[0].Value, value.NewDouble(tsTotal/uint64(len(timestamps)), total))\n\t}\n\treturn output\n}", "func (m *Map) Reduce(\n\treduce func(map[interface{}]interface{}) interface{},\n\tjoin func(x, y interface{}) interface{},\n) interface{} {\n\tsplits := m.splits\n\t// NewMap ensures that len(splits) > 0\n\tresult := splits[0].reduce(reduce)\n\tfor i := 1; i < len(splits); i++ {\n\t\tresult = join(result, splits[i].reduce(reduce))\n\t}\n\treturn result\n}", "func sum(arr []float64) float64 {\n\tif len(arr) == 0 {\n\t\treturn 0.0\n\t}\n\n\tresult := 0.0\n\tfor _, v := range arr {\n\t\tresult += v\n\t}\n\treturn result\n}", "func (v Vec) Sum() float64 {\n\treturn v.Reduce(func(a, e float64) float64 { return a + e }, 0.0)\n}", "func Sum(h *simulator.Handle, vecs ...[]float64) []float64 {\n\tfor _, v := range vecs[1:] {\n\t\tif len(v) != len(vecs[0]) {\n\t\t\tpanic(\"mismatching lengths\")\n\t\t}\n\t}\n\tres := make([]float64, len(vecs[0]))\n\tfor _, v := range vecs {\n\t\tfor i, x := range v {\n\t\t\tres[i] += x\n\t\t}\n\t}\n\n\t// Simulate computation time.\n\th.Sleep(FlopTime * float64(len(vecs)*len(vecs[0])))\n\n\treturn res\n}", "func (d Digest64) Sum(b []byte) []byte {\n\th1 := d.Sum64()\n\treturn append(b,\n\t\tbyte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32),\n\t\tbyte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1))\n}", "func SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V {\n\tvar s V\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}", "func (sshConfig *SSHConfig) Sum(path string) (result string, err error) {\n\treturn sshConfig.internalSum(\"sum\", path)\n}", "func Sum(list []float64) float64 {\n\ttotal := 0.0\n\tfor _, item := range list {\n\t\ttotal += item\n\t}\n\n\treturn total\n}", "func Reduce[ValT, AccumT any](\n\tslc []ValT,\n\tstart AccumT,\n\tfn func(AccumT, ValT) AccumT,\n) AccumT {\n\tret := start\n\tfor _, t := range slc {\n\t\tret = fn(ret, t)\n\t}\n\treturn ret\n}", "func (set Int64Set) Sum() int64 {\n\tsum := int64(0)\n\tfor v, _ := range set {\n\t\tsum = sum + v\n\t}\n\treturn sum\n}", "func sumValues(values ...int) (result int) {\n\tfmt.Println(values)\n\tfor _, v := range values {\n\t\tresult += v\n\t}\n\treturn\n}", "func (i *identity) Sum(b []byte) []byte {\n\treturn append(b, i.data...)\n}", "func (ref *digest) Sum(dst []byte) []byte {\n\tdgt := *ref\n\thsh := [64]byte{}\n\tdgt.Close(hsh[:], 0, 0)\n\treturn append(dst, hsh[:]...)\n}", "func (ref *digest) Sum(dst []byte) []byte {\n\tdgt := *ref\n\thsh := [64]byte{}\n\tdgt.Close(hsh[:], 0, 0)\n\treturn append(dst, hsh[:]...)\n}", "func Sum(args ...int) (res int) {\n\tfor _, v := range args {\n\t\tres += v\n\t}\n\n\treturn\n}", "func Reduce(key string, values []string) string {\n\t// some reduce tasks sleep for a long time; potentially seeing if\n\t// a worker will accidentally exit early\n\tif strings.Contains(key, \"sherlock\") || strings.Contains(key, \"tom\") {\n\t\ttime.Sleep(time.Duration(3 * time.Second))\n\t}\n\t// return the number of occurrences of this file.\n\treturn strconv.Itoa(len(values))\n}", "func Sum(data []byte, h0, h1, h2, h3, h4 uint32, desiredLen int) [Size]byte {\n\tvar d digest\n\td.Reset(h0, h1, h2, h3, h4)\n\td.Write(data)\n\treturn d.checkSum(desiredLen)\n}", "func Reduce(elements []Value, memo Value, reductor BiMapper) Value {\n\tfor _, elem := range elements {\n\t\tmemo = reductor(memo, elem)\n\t}\n\treturn memo\n}", "func Sum(data []byte) [Size]byte {\n\tvar d digest\n\td.Reset()\n\td.Write(data)\n\treturn d.checkSum()\n}", "func Sum64(msg []byte, key *[KeySize]byte) uint64 {\n\tk0 := binary.LittleEndian.Uint64(key[0:])\n\tk1 := binary.LittleEndian.Uint64(key[8:])\n\n\tvar hVal [4]uint64\n\thVal[0] = k0 ^ c0\n\thVal[1] = k1 ^ c1\n\thVal[2] = k0 ^ c2\n\thVal[3] = k1 ^ c3\n\n\tn := len(msg)\n\tctr := byte(n)\n\n\tif n >= BlockSize {\n\t\tn &= (^(BlockSize - 1))\n\t\tcore(&hVal, msg[:n])\n\t\tmsg = msg[n:]\n\t}\n\n\tvar block [BlockSize]byte\n\tcopy(block[:], msg)\n\tblock[7] = ctr\n\n\treturn finalize64(&hVal, &block)\n}", "func Sum(vals ...int) int {\n\ttotal := 0\n\n\tfor _, val := range vals {\n\t\ttotal += val\n\t}\n\n\treturn total\n}", "func MapSum[T Number](slicesOfItems [][]T) []T {\n\tresult := make([]T, 0, len(slicesOfItems))\n\n\tfor _, items := range slicesOfItems {\n\t\tresult = append(result, Sum(items))\n\t}\n\treturn result\n}", "func Sum(series []Series) (Series, error) {\n\treturn applyOperator(series, OperatorSum)\n}", "func (r *Rollsum32) Sum(b []byte) []byte {\n\tif b != nil && cap(b)-len(b) >= 4 {\n\t\tp := len(b)\n\t\tb = b[:len(b)+4]\n\t\tr.Rollsum32Base.GetSum(b[p:])\n\t\treturn b\n\t} else {\n\t\tresult := []byte{0, 0, 0, 0}\n\t\tr.Rollsum32Base.GetSum(result)\n\t\treturn append(b, result...)\n\t}\n}", "func (r *Rollsum32) Sum(b []byte) []byte {\n\tif b != nil && cap(b)-len(b) >= 4 {\n\t\tp := len(b)\n\t\tb = b[:len(b)+4]\n\t\tr.Rollsum32Base.GetSum(b[p:])\n\t\treturn b\n\t} else {\n\t\tresult := []byte{0, 0, 0, 0}\n\t\tr.Rollsum32Base.GetSum(result)\n\t\treturn append(b, result...)\n\t}\n}", "func (this *KxiWorker) Reduce(key interface{}, values []interface{}) (kv mr.KeyValue) {\n kv = mr.NewKeyValue()\n switch key.(mr.GroupKey).Group() {\n case GROUP_URL_SERV:\n vals := mr.ConvertAnySliceToFloat(values)\n kv[TIME_ALL] = stats.StatsSum(vals)\n kv[TIME_MAX] = stats.StatsMax(vals)\n kv[TIME_TOP] = stats.StatsSumTopN(vals, topsum)\n kv[TIME_AVG] = stats.StatsMean(vals)\n kv[TIME_STD] = stats.StatsSampleStandardDeviationCoefficient(vals)\n kv[CALL_ALL] = float64(stats.StatsCount(vals))\n case GROUP_KXI:\n vals := mr.ConvertAnySliceToFloat(values)\n kv[TIME_ALL] = stats.StatsSum(vals)\n kv[TIME_MIN] = stats.StatsMin(vals)\n kv[TIME_TOP] = stats.StatsSumTopN(vals, topsum)\n kv[TIME_MAX] = stats.StatsMax(vals)\n kv[TIME_AVG] = stats.StatsMean(vals)\n kv[TIME_STD] = stats.StatsSampleStandardDeviationCoefficient(vals)\n kv[CALL_ALL] = float64(stats.StatsCount(vals))\n case GROUP_URL_RID:\n vals := mr.ConvertAnySliceToFloat(values)\n kv[CALL_ALL] = float64(stats.StatsCount(vals))\n kv[TIME_ALL] = stats.StatsSum(vals)\n case GROUP_URL_SQL:\n vals := mr.ConvertAnySliceToFloat(values)\n kv[CALL_ALL] = float64(stats.StatsCount(vals))\n kv[TIME_MAX] = stats.StatsMax(vals)\n kv[TIME_AVG] = stats.StatsMean(vals)\n case GROUP_URL:\n vals := mr.ConvertAnySliceToString(values) // rids of this url\n c := stats.NewCounter(vals)\n kv[REQ_ALL] = float64(len(c))\n }\n\n return\n}", "func ReduceAnyValueToAnyValue(in []AnyValue, memo AnyValue, f func(AnyValue, AnyValue) AnyValue) AnyValue {\n\tfor _, value := range in {\n\t\tmemo = f(memo, value)\n\t}\n\treturn memo\n}", "func (h *ihash) Sum(b []byte) []byte {\n\tn, err := h.s.Read(h.buf)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"alg: failed to read out finalized hash: %v\", err))\n\t}\n\n\treturn append(b, h.buf[:n]...)\n}", "func reduceF(key string, values []string) string {\n\t// Your code here (Part II).\n\treturn strconv.Itoa(len(values))\n}", "func (r Result) Sum() int {\n\tvar s int\n\n\tfor _, n := range r.rolls {\n\t\ts += n.N\n\t}\n\n\treturn s\n}", "func SumAll(in ...[]int) (sums []int) {\n\tfor _, v := range in {\n\t\tsums = append(sums, Sum(v))\n\t}\n\treturn\n}", "func (c *Clac) Sum() error {\n\treturn c.applyFloat(variadic, func(vals []value.Value) (value.Value, error) {\n\t\treturn reduceFloat(zero, vals, func(a, b value.Value) (value.Value, error) {\n\t\t\treturn binary(a, \"+\", b)\n\t\t})\n\t})\n}", "func (f ReduceFunc) Reduce(value [][]byte) (reduced [][]byte, err error) {\n\treturn f(value)\n}", "func Reduce(data []float64, mean float64, exponent float64) float64 {\r\n\treturn Accumulate(data, 0, func(a float64, b float64) float64 {\r\n\t\treturn a + math.Pow(b-mean, exponent)\r\n\t})\r\n}", "func sliceSum(a []float64) float64", "func (v Vec) Reduce(f func(accum, elem float64) float64, startVal float64) float64 {\n\taccum := startVal\n\tfor _, val := range v {\n\t\taccum = f(accum, val)\n\t}\n\treturn accum\n}", "func AllReduceInt64(comm Comm, in, out *int64, n int, op Op) {\n\tC.MPI_Allreduce(unsafe.Pointer(in), unsafe.Pointer(out), C.int(n), C.MPI_Datatype(MPI_i64), C.MPI_Op(SUM), C.MPI_Comm(comm))\n}", "func (d Digest128) Sum(b []byte) []byte {\n\th1, h2 := d.Sum128()\n\treturn append(b,\n\t\tbyte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32),\n\t\tbyte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1),\n\n\t\tbyte(h2>>56), byte(h2>>48), byte(h2>>40), byte(h2>>32),\n\t\tbyte(h2>>24), byte(h2>>16), byte(h2>>8), byte(h2),\n\t)\n}", "func bzReduce(combine bzConsumer, start float64, L, d int, fn BzFunc) float64 {\n\tpoints := bzPoints(L, d)\n\ttotal := start\n\tfor i := 0; i < len(points); i++ {\n\t\tk := points[i]\n\t\ttotal = combine(fn(k), total)\n\t}\n\treturn total\n}", "func sumResults(sumResult []float64) float64 {\n\tresult := 0.0\n\tfor _, s := range sumResult {\n\t\tresult += s\n\t}\n\treturn result\n}", "func (bit *BIT) Sum(k int) T {\n\tret := T{0}\n\tfor i := k; i > 0; i -= i & -i {\n\t\tret.val += bit.Bit[i].val\n\t}\n\treturn ret\n}", "func MapSum(itr Iterator) interface{} {\n\tn := float64(0)\n\tcount := 0\n\tvar resultType NumberType\n\tfor k, v := itr.Next(); k != -1; k, v = itr.Next() {\n\t\tcount++\n\t\tswitch n1 := v.(type) {\n\t\tcase float64:\n\t\t\tn += n1\n\t\tcase int64:\n\t\t\tn += float64(n1)\n\t\t\tresultType = Int64Type\n\t\t}\n\t}\n\tif count > 0 {\n\t\tswitch resultType {\n\t\tcase Float64Type:\n\t\t\treturn n\n\t\tcase Int64Type:\n\t\t\treturn int64(n)\n\t\t}\n\t}\n\treturn nil\n}", "func (h SenHash) Sum(b []byte) []byte {\n\tb = append(b, h.blake512hasher.Sum(nil)[:32]...)\n\treturn b\n}", "func Reduce[T any, R any](collection []T, accumulator func(R, T, int) R, initial R) R {\n\tfor i, item := range collection {\n\t\tinitial = accumulator(initial, item, i)\n\t}\n\n\treturn initial\n}", "func (d digest) Sum(in []byte) []byte {\n\t// Note d is a copy so that the caller can keep writing and summing.\n\n\tsum := d.checkSum()\n\tif d.Size() == Size224 {\n\t\treturn append(in, sum[:Size224]...)\n\t}\n\treturn append(in, sum[:]...)\n}", "func (bh *BuzHash) Sum(b []byte) []byte {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, bh.state)\n\thash := buf.Bytes()\n\tfor _, hb := range hash {\n\t\tb = append(b, hb)\n\t}\n\n\treturn b\n}", "func (t *Tensor) sum(axis int) (retVal *Tensor) {\n\treturn t.reduce(axis, vecAdd, sum, add)\n}", "func _reduce(fn redfn, total int, c chan dict) dict {\n\tfinalMap := make(dict)\n\n\tfor worker := 0; worker < total; worker++ {\n\t\tm := <-c\n\t\tfn(finalMap, m)\n\t}\n\n\treturn finalMap\n}", "func (tf *TracingFingerprint) Sum() []byte {\n\treturn tf.Fingerprint.Sum()\n}", "func sum(nums []int64) (sum int64) {\n\tfor _, v := range nums {\n\t\tsum += v\n\t}\n\treturn\n}", "func doSum(h hash.Hash, b []byte, data ...[]byte) ([]byte, error) {\n\th.Reset()\n\tfor _, v := range data {\n\t\tvar err error\n\t\t_, err = h.Write(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn h.Sum(b), nil\n}", "func TotalSum(array []int) (ts int) {\n\tfor _, v := range array {\n\t\tts += v\n\t}\n\treturn\n}", "func Reduce(in interface{}, memo interface{}, fn reduceFn) interface{} {\n\tresult := memo\n\tval := reflect.ValueOf(in)\n\n\tfor i := 0; i < val.Len(); i++ {\n\t\tresult = fn(result, val.Index(i).Interface())\n\t}\n\n\treturn result\n}", "func sum(args ...float64) float64 {\n\ttotal := float64(0)\n\tfor _, arg := range args {\n\t\ttotal += arg\n\t}\n\treturn total\n}", "func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumAttr) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"SparseReduceSum\",\n\t\tInput: []tf.Input{\n\t\t\tinput_indices, input_values, input_shape, reduction_axes,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func Sum(out *[16]byte, m []byte, key *[32]byte) {\n\tr := key\n\ts := key[16:]\n\tvar (\n\t\ty7 float64\n\t\ty6 float64\n\t\ty1 float64\n\t\ty0 float64\n\t\ty5 float64\n\t\ty4 float64\n\t\tx7 float64\n\t\tx6 float64\n\t\tx1 float64\n\t\tx0 float64\n\t\ty3 float64\n\t\ty2 float64\n\t\tx5 float64\n\t\tr3lowx0 float64\n\t\tx4 float64\n\t\tr0lowx6 float64\n\t\tx3 float64\n\t\tr3highx0 float64\n\t\tx2 float64\n\t\tr0highx6 float64\n\t\tr0lowx0 float64\n\t\tsr1lowx6 float64\n\t\tr0highx0 float64\n\t\tsr1highx6 float64\n\t\tsr3low float64\n\t\tr1lowx0 float64\n\t\tsr2lowx6 float64\n\t\tr1highx0 float64\n\t\tsr2highx6 float64\n\t\tr2lowx0 float64\n\t\tsr3lowx6 float64\n\t\tr2highx0 float64\n\t\tsr3highx6 float64\n\t\tr1highx4 float64\n\t\tr1lowx4 float64\n\t\tr0highx4 float64\n\t\tr0lowx4 float64\n\t\tsr3highx4 float64\n\t\tsr3lowx4 float64\n\t\tsr2highx4 float64\n\t\tsr2lowx4 float64\n\t\tr0lowx2 float64\n\t\tr0highx2 float64\n\t\tr1lowx2 float64\n\t\tr1highx2 float64\n\t\tr2lowx2 float64\n\t\tr2highx2 float64\n\t\tsr3lowx2 float64\n\t\tsr3highx2 float64\n\t\tz0 float64\n\t\tz1 float64\n\t\tz2 float64\n\t\tz3 float64\n\t\tm0 int64\n\t\tm1 int64\n\t\tm2 int64\n\t\tm3 int64\n\t\tm00 uint32\n\t\tm01 uint32\n\t\tm02 uint32\n\t\tm03 uint32\n\t\tm10 uint32\n\t\tm11 uint32\n\t\tm12 uint32\n\t\tm13 uint32\n\t\tm20 uint32\n\t\tm21 uint32\n\t\tm22 uint32\n\t\tm23 uint32\n\t\tm30 uint32\n\t\tm31 uint32\n\t\tm32 uint32\n\t\tm33 uint64\n\t\tlbelow2 int32\n\t\tlbelow3 int32\n\t\tlbelow4 int32\n\t\tlbelow5 int32\n\t\tlbelow6 int32\n\t\tlbelow7 int32\n\t\tlbelow8 int32\n\t\tlbelow9 int32\n\t\tlbelow10 int32\n\t\tlbelow11 int32\n\t\tlbelow12 int32\n\t\tlbelow13 int32\n\t\tlbelow14 int32\n\t\tlbelow15 int32\n\t\ts00 uint32\n\t\ts01 uint32\n\t\ts02 uint32\n\t\ts03 uint32\n\t\ts10 uint32\n\t\ts11 uint32\n\t\ts12 uint32\n\t\ts13 uint32\n\t\ts20 uint32\n\t\ts21 uint32\n\t\ts22 uint32\n\t\ts23 uint32\n\t\ts30 uint32\n\t\ts31 uint32\n\t\ts32 uint32\n\t\ts33 uint32\n\t\tbits32 uint64\n\t\tf uint64\n\t\tf0 uint64\n\t\tf1 uint64\n\t\tf2 uint64\n\t\tf3 uint64\n\t\tf4 uint64\n\t\tg uint64\n\t\tg0 uint64\n\t\tg1 uint64\n\t\tg2 uint64\n\t\tg3 uint64\n\t\tg4 uint64\n\t)\n\n\tvar p int32\n\n\tl := int32(len(m))\n\n\tr00 := uint32(r[0])\n\n\tr01 := uint32(r[1])\n\n\tr02 := uint32(r[2])\n\tr0 := int64(2151)\n\n\tr03 := uint32(r[3])\n\tr03 &= 15\n\tr0 <<= 51\n\n\tr10 := uint32(r[4])\n\tr10 &= 252\n\tr01 <<= 8\n\tr0 += int64(r00)\n\n\tr11 := uint32(r[5])\n\tr02 <<= 16\n\tr0 += int64(r01)\n\n\tr12 := uint32(r[6])\n\tr03 <<= 24\n\tr0 += int64(r02)\n\n\tr13 := uint32(r[7])\n\tr13 &= 15\n\tr1 := int64(2215)\n\tr0 += int64(r03)\n\n\td0 := r0\n\tr1 <<= 51\n\tr2 := int64(2279)\n\n\tr20 := uint32(r[8])\n\tr20 &= 252\n\tr11 <<= 8\n\tr1 += int64(r10)\n\n\tr21 := uint32(r[9])\n\tr12 <<= 16\n\tr1 += int64(r11)\n\n\tr22 := uint32(r[10])\n\tr13 <<= 24\n\tr1 += int64(r12)\n\n\tr23 := uint32(r[11])\n\tr23 &= 15\n\tr2 <<= 51\n\tr1 += int64(r13)\n\n\td1 := r1\n\tr21 <<= 8\n\tr2 += int64(r20)\n\n\tr30 := uint32(r[12])\n\tr30 &= 252\n\tr22 <<= 16\n\tr2 += int64(r21)\n\n\tr31 := uint32(r[13])\n\tr23 <<= 24\n\tr2 += int64(r22)\n\n\tr32 := uint32(r[14])\n\tr2 += int64(r23)\n\tr3 := int64(2343)\n\n\td2 := r2\n\tr3 <<= 51\n\n\tr33 := uint32(r[15])\n\tr33 &= 15\n\tr31 <<= 8\n\tr3 += int64(r30)\n\n\tr32 <<= 16\n\tr3 += int64(r31)\n\n\tr33 <<= 24\n\tr3 += int64(r32)\n\n\tr3 += int64(r33)\n\th0 := alpha32 - alpha32\n\n\td3 := r3\n\th1 := alpha32 - alpha32\n\n\th2 := alpha32 - alpha32\n\n\th3 := alpha32 - alpha32\n\n\th4 := alpha32 - alpha32\n\n\tr0low := math.Float64frombits(uint64(d0))\n\th5 := alpha32 - alpha32\n\n\tr1low := math.Float64frombits(uint64(d1))\n\th6 := alpha32 - alpha32\n\n\tr2low := math.Float64frombits(uint64(d2))\n\th7 := alpha32 - alpha32\n\n\tr0low -= alpha0\n\n\tr1low -= alpha32\n\n\tr2low -= alpha64\n\n\tr0high := r0low + alpha18\n\n\tr3low := math.Float64frombits(uint64(d3))\n\n\tr1high := r1low + alpha50\n\tsr1low := scale * r1low\n\n\tr2high := r2low + alpha82\n\tsr2low := scale * r2low\n\n\tr0high -= alpha18\n\tr0high_stack := r0high\n\n\tr3low -= alpha96\n\n\tr1high -= alpha50\n\tr1high_stack := r1high\n\n\tsr1high := sr1low + alpham80\n\n\tr0low -= r0high\n\n\tr2high -= alpha82\n\tsr3low = scale * r3low\n\n\tsr2high := sr2low + alpham48\n\n\tr1low -= r1high\n\tr1low_stack := r1low\n\n\tsr1high -= alpham80\n\tsr1high_stack := sr1high\n\n\tr2low -= r2high\n\tr2low_stack := r2low\n\n\tsr2high -= alpham48\n\tsr2high_stack := sr2high\n\n\tr3high := r3low + alpha112\n\tr0low_stack := r0low\n\n\tsr1low -= sr1high\n\tsr1low_stack := sr1low\n\n\tsr3high := sr3low + alpham16\n\tr2high_stack := r2high\n\n\tsr2low -= sr2high\n\tsr2low_stack := sr2low\n\n\tr3high -= alpha112\n\tr3high_stack := r3high\n\n\tsr3high -= alpham16\n\tsr3high_stack := sr3high\n\n\tr3low -= r3high\n\tr3low_stack := r3low\n\n\tsr3low -= sr3high\n\tsr3low_stack := sr3low\n\n\tif l < 16 {\n\t\tgoto addatmost15bytes\n\t}\n\n\tm00 = uint32(m[p+0])\n\tm0 = 2151\n\n\tm0 <<= 51\n\tm1 = 2215\n\tm01 = uint32(m[p+1])\n\n\tm1 <<= 51\n\tm2 = 2279\n\tm02 = uint32(m[p+2])\n\n\tm2 <<= 51\n\tm3 = 2343\n\tm03 = uint32(m[p+3])\n\n\tm10 = uint32(m[p+4])\n\tm01 <<= 8\n\tm0 += int64(m00)\n\n\tm11 = uint32(m[p+5])\n\tm02 <<= 16\n\tm0 += int64(m01)\n\n\tm12 = uint32(m[p+6])\n\tm03 <<= 24\n\tm0 += int64(m02)\n\n\tm13 = uint32(m[p+7])\n\tm3 <<= 51\n\tm0 += int64(m03)\n\n\tm20 = uint32(m[p+8])\n\tm11 <<= 8\n\tm1 += int64(m10)\n\n\tm21 = uint32(m[p+9])\n\tm12 <<= 16\n\tm1 += int64(m11)\n\n\tm22 = uint32(m[p+10])\n\tm13 <<= 24\n\tm1 += int64(m12)\n\n\tm23 = uint32(m[p+11])\n\tm1 += int64(m13)\n\n\tm30 = uint32(m[p+12])\n\tm21 <<= 8\n\tm2 += int64(m20)\n\n\tm31 = uint32(m[p+13])\n\tm22 <<= 16\n\tm2 += int64(m21)\n\n\tm32 = uint32(m[p+14])\n\tm23 <<= 24\n\tm2 += int64(m22)\n\n\tm33 = uint64(m[p+15])\n\tm2 += int64(m23)\n\n\td0 = m0\n\tm31 <<= 8\n\tm3 += int64(m30)\n\n\td1 = m1\n\tm32 <<= 16\n\tm3 += int64(m31)\n\n\td2 = m2\n\tm33 += 256\n\n\tm33 <<= 24\n\tm3 += int64(m32)\n\n\tm3 += int64(m33)\n\td3 = m3\n\n\tp += 16\n\tl -= 16\n\n\tz0 = math.Float64frombits(uint64(d0))\n\n\tz1 = math.Float64frombits(uint64(d1))\n\n\tz2 = math.Float64frombits(uint64(d2))\n\n\tz3 = math.Float64frombits(uint64(d3))\n\n\tz0 -= alpha0\n\n\tz1 -= alpha32\n\n\tz2 -= alpha64\n\n\tz3 -= alpha96\n\n\th0 += z0\n\n\th1 += z1\n\n\th3 += z2\n\n\th5 += z3\n\n\tif l < 16 {\n\t\tgoto multiplyaddatmost15bytes\n\t}\n\nmultiplyaddatleast16bytes:\n\n\tm2 = 2279\n\tm20 = uint32(m[p+8])\n\ty7 = h7 + alpha130\n\n\tm2 <<= 51\n\tm3 = 2343\n\tm21 = uint32(m[p+9])\n\ty6 = h6 + alpha130\n\n\tm3 <<= 51\n\tm0 = 2151\n\tm22 = uint32(m[p+10])\n\ty1 = h1 + alpha32\n\n\tm0 <<= 51\n\tm1 = 2215\n\tm23 = uint32(m[p+11])\n\ty0 = h0 + alpha32\n\n\tm1 <<= 51\n\tm30 = uint32(m[p+12])\n\ty7 -= alpha130\n\n\tm21 <<= 8\n\tm2 += int64(m20)\n\tm31 = uint32(m[p+13])\n\ty6 -= alpha130\n\n\tm22 <<= 16\n\tm2 += int64(m21)\n\tm32 = uint32(m[p+14])\n\ty1 -= alpha32\n\n\tm23 <<= 24\n\tm2 += int64(m22)\n\tm33 = uint64(m[p+15])\n\ty0 -= alpha32\n\n\tm2 += int64(m23)\n\tm00 = uint32(m[p+0])\n\ty5 = h5 + alpha96\n\n\tm31 <<= 8\n\tm3 += int64(m30)\n\tm01 = uint32(m[p+1])\n\ty4 = h4 + alpha96\n\n\tm32 <<= 16\n\tm02 = uint32(m[p+2])\n\tx7 = h7 - y7\n\ty7 *= scale\n\n\tm33 += 256\n\tm03 = uint32(m[p+3])\n\tx6 = h6 - y6\n\ty6 *= scale\n\n\tm33 <<= 24\n\tm3 += int64(m31)\n\tm10 = uint32(m[p+4])\n\tx1 = h1 - y1\n\n\tm01 <<= 8\n\tm3 += int64(m32)\n\tm11 = uint32(m[p+5])\n\tx0 = h0 - y0\n\n\tm3 += int64(m33)\n\tm0 += int64(m00)\n\tm12 = uint32(m[p+6])\n\ty5 -= alpha96\n\n\tm02 <<= 16\n\tm0 += int64(m01)\n\tm13 = uint32(m[p+7])\n\ty4 -= alpha96\n\n\tm03 <<= 24\n\tm0 += int64(m02)\n\td2 = m2\n\tx1 += y7\n\n\tm0 += int64(m03)\n\td3 = m3\n\tx0 += y6\n\n\tm11 <<= 8\n\tm1 += int64(m10)\n\td0 = m0\n\tx7 += y5\n\n\tm12 <<= 16\n\tm1 += int64(m11)\n\tx6 += y4\n\n\tm13 <<= 24\n\tm1 += int64(m12)\n\ty3 = h3 + alpha64\n\n\tm1 += int64(m13)\n\td1 = m1\n\ty2 = h2 + alpha64\n\n\tx0 += x1\n\n\tx6 += x7\n\n\ty3 -= alpha64\n\tr3low = r3low_stack\n\n\ty2 -= alpha64\n\tr0low = r0low_stack\n\n\tx5 = h5 - y5\n\tr3lowx0 = r3low * x0\n\tr3high = r3high_stack\n\n\tx4 = h4 - y4\n\tr0lowx6 = r0low * x6\n\tr0high = r0high_stack\n\n\tx3 = h3 - y3\n\tr3highx0 = r3high * x0\n\tsr1low = sr1low_stack\n\n\tx2 = h2 - y2\n\tr0highx6 = r0high * x6\n\tsr1high = sr1high_stack\n\n\tx5 += y3\n\tr0lowx0 = r0low * x0\n\tr1low = r1low_stack\n\n\th6 = r3lowx0 + r0lowx6\n\tsr1lowx6 = sr1low * x6\n\tr1high = r1high_stack\n\n\tx4 += y2\n\tr0highx0 = r0high * x0\n\tsr2low = sr2low_stack\n\n\th7 = r3highx0 + r0highx6\n\tsr1highx6 = sr1high * x6\n\tsr2high = sr2high_stack\n\n\tx3 += y1\n\tr1lowx0 = r1low * x0\n\tr2low = r2low_stack\n\n\th0 = r0lowx0 + sr1lowx6\n\tsr2lowx6 = sr2low * x6\n\tr2high = r2high_stack\n\n\tx2 += y0\n\tr1highx0 = r1high * x0\n\tsr3low = sr3low_stack\n\n\th1 = r0highx0 + sr1highx6\n\tsr2highx6 = sr2high * x6\n\tsr3high = sr3high_stack\n\n\tx4 += x5\n\tr2lowx0 = r2low * x0\n\tz2 = math.Float64frombits(uint64(d2))\n\n\th2 = r1lowx0 + sr2lowx6\n\tsr3lowx6 = sr3low * x6\n\n\tx2 += x3\n\tr2highx0 = r2high * x0\n\tz3 = math.Float64frombits(uint64(d3))\n\n\th3 = r1highx0 + sr2highx6\n\tsr3highx6 = sr3high * x6\n\n\tr1highx4 = r1high * x4\n\tz2 -= alpha64\n\n\th4 = r2lowx0 + sr3lowx6\n\tr1lowx4 = r1low * x4\n\n\tr0highx4 = r0high * x4\n\tz3 -= alpha96\n\n\th5 = r2highx0 + sr3highx6\n\tr0lowx4 = r0low * x4\n\n\th7 += r1highx4\n\tsr3highx4 = sr3high * x4\n\n\th6 += r1lowx4\n\tsr3lowx4 = sr3low * x4\n\n\th5 += r0highx4\n\tsr2highx4 = sr2high * x4\n\n\th4 += r0lowx4\n\tsr2lowx4 = sr2low * x4\n\n\th3 += sr3highx4\n\tr0lowx2 = r0low * x2\n\n\th2 += sr3lowx4\n\tr0highx2 = r0high * x2\n\n\th1 += sr2highx4\n\tr1lowx2 = r1low * x2\n\n\th0 += sr2lowx4\n\tr1highx2 = r1high * x2\n\n\th2 += r0lowx2\n\tr2lowx2 = r2low * x2\n\n\th3 += r0highx2\n\tr2highx2 = r2high * x2\n\n\th4 += r1lowx2\n\tsr3lowx2 = sr3low * x2\n\n\th5 += r1highx2\n\tsr3highx2 = sr3high * x2\n\n\tp += 16\n\tl -= 16\n\th6 += r2lowx2\n\n\th7 += r2highx2\n\n\tz1 = math.Float64frombits(uint64(d1))\n\th0 += sr3lowx2\n\n\tz0 = math.Float64frombits(uint64(d0))\n\th1 += sr3highx2\n\n\tz1 -= alpha32\n\n\tz0 -= alpha0\n\n\th5 += z3\n\n\th3 += z2\n\n\th1 += z1\n\n\th0 += z0\n\n\tif l >= 16 {\n\t\tgoto multiplyaddatleast16bytes\n\t}\n\nmultiplyaddatmost15bytes:\n\n\ty7 = h7 + alpha130\n\n\ty6 = h6 + alpha130\n\n\ty1 = h1 + alpha32\n\n\ty0 = h0 + alpha32\n\n\ty7 -= alpha130\n\n\ty6 -= alpha130\n\n\ty1 -= alpha32\n\n\ty0 -= alpha32\n\n\ty5 = h5 + alpha96\n\n\ty4 = h4 + alpha96\n\n\tx7 = h7 - y7\n\ty7 *= scale\n\n\tx6 = h6 - y6\n\ty6 *= scale\n\n\tx1 = h1 - y1\n\n\tx0 = h0 - y0\n\n\ty5 -= alpha96\n\n\ty4 -= alpha96\n\n\tx1 += y7\n\n\tx0 += y6\n\n\tx7 += y5\n\n\tx6 += y4\n\n\ty3 = h3 + alpha64\n\n\ty2 = h2 + alpha64\n\n\tx0 += x1\n\n\tx6 += x7\n\n\ty3 -= alpha64\n\tr3low = r3low_stack\n\n\ty2 -= alpha64\n\tr0low = r0low_stack\n\n\tx5 = h5 - y5\n\tr3lowx0 = r3low * x0\n\tr3high = r3high_stack\n\n\tx4 = h4 - y4\n\tr0lowx6 = r0low * x6\n\tr0high = r0high_stack\n\n\tx3 = h3 - y3\n\tr3highx0 = r3high * x0\n\tsr1low = sr1low_stack\n\n\tx2 = h2 - y2\n\tr0highx6 = r0high * x6\n\tsr1high = sr1high_stack\n\n\tx5 += y3\n\tr0lowx0 = r0low * x0\n\tr1low = r1low_stack\n\n\th6 = r3lowx0 + r0lowx6\n\tsr1lowx6 = sr1low * x6\n\tr1high = r1high_stack\n\n\tx4 += y2\n\tr0highx0 = r0high * x0\n\tsr2low = sr2low_stack\n\n\th7 = r3highx0 + r0highx6\n\tsr1highx6 = sr1high * x6\n\tsr2high = sr2high_stack\n\n\tx3 += y1\n\tr1lowx0 = r1low * x0\n\tr2low = r2low_stack\n\n\th0 = r0lowx0 + sr1lowx6\n\tsr2lowx6 = sr2low * x6\n\tr2high = r2high_stack\n\n\tx2 += y0\n\tr1highx0 = r1high * x0\n\tsr3low = sr3low_stack\n\n\th1 = r0highx0 + sr1highx6\n\tsr2highx6 = sr2high * x6\n\tsr3high = sr3high_stack\n\n\tx4 += x5\n\tr2lowx0 = r2low * x0\n\n\th2 = r1lowx0 + sr2lowx6\n\tsr3lowx6 = sr3low * x6\n\n\tx2 += x3\n\tr2highx0 = r2high * x0\n\n\th3 = r1highx0 + sr2highx6\n\tsr3highx6 = sr3high * x6\n\n\tr1highx4 = r1high * x4\n\n\th4 = r2lowx0 + sr3lowx6\n\tr1lowx4 = r1low * x4\n\n\tr0highx4 = r0high * x4\n\n\th5 = r2highx0 + sr3highx6\n\tr0lowx4 = r0low * x4\n\n\th7 += r1highx4\n\tsr3highx4 = sr3high * x4\n\n\th6 += r1lowx4\n\tsr3lowx4 = sr3low * x4\n\n\th5 += r0highx4\n\tsr2highx4 = sr2high * x4\n\n\th4 += r0lowx4\n\tsr2lowx4 = sr2low * x4\n\n\th3 += sr3highx4\n\tr0lowx2 = r0low * x2\n\n\th2 += sr3lowx4\n\tr0highx2 = r0high * x2\n\n\th1 += sr2highx4\n\tr1lowx2 = r1low * x2\n\n\th0 += sr2lowx4\n\tr1highx2 = r1high * x2\n\n\th2 += r0lowx2\n\tr2lowx2 = r2low * x2\n\n\th3 += r0highx2\n\tr2highx2 = r2high * x2\n\n\th4 += r1lowx2\n\tsr3lowx2 = sr3low * x2\n\n\th5 += r1highx2\n\tsr3highx2 = sr3high * x2\n\n\th6 += r2lowx2\n\n\th7 += r2highx2\n\n\th0 += sr3lowx2\n\n\th1 += sr3highx2\n\naddatmost15bytes:\n\n\tif l == 0 {\n\t\tgoto nomorebytes\n\t}\n\n\tlbelow2 = l - 2\n\n\tlbelow3 = l - 3\n\n\tlbelow2 >>= 31\n\tlbelow4 = l - 4\n\n\tm00 = uint32(m[p+0])\n\tlbelow3 >>= 31\n\tp += lbelow2\n\n\tm01 = uint32(m[p+1])\n\tlbelow4 >>= 31\n\tp += lbelow3\n\n\tm02 = uint32(m[p+2])\n\tp += lbelow4\n\tm0 = 2151\n\n\tm03 = uint32(m[p+3])\n\tm0 <<= 51\n\tm1 = 2215\n\n\tm0 += int64(m00)\n\tm01 &^= uint32(lbelow2)\n\n\tm02 &^= uint32(lbelow3)\n\tm01 -= uint32(lbelow2)\n\n\tm01 <<= 8\n\tm03 &^= uint32(lbelow4)\n\n\tm0 += int64(m01)\n\tlbelow2 -= lbelow3\n\n\tm02 += uint32(lbelow2)\n\tlbelow3 -= lbelow4\n\n\tm02 <<= 16\n\tm03 += uint32(lbelow3)\n\n\tm03 <<= 24\n\tm0 += int64(m02)\n\n\tm0 += int64(m03)\n\tlbelow5 = l - 5\n\n\tlbelow6 = l - 6\n\tlbelow7 = l - 7\n\n\tlbelow5 >>= 31\n\tlbelow8 = l - 8\n\n\tlbelow6 >>= 31\n\tp += lbelow5\n\n\tm10 = uint32(m[p+4])\n\tlbelow7 >>= 31\n\tp += lbelow6\n\n\tm11 = uint32(m[p+5])\n\tlbelow8 >>= 31\n\tp += lbelow7\n\n\tm12 = uint32(m[p+6])\n\tm1 <<= 51\n\tp += lbelow8\n\n\tm13 = uint32(m[p+7])\n\tm10 &^= uint32(lbelow5)\n\tlbelow4 -= lbelow5\n\n\tm10 += uint32(lbelow4)\n\tlbelow5 -= lbelow6\n\n\tm11 &^= uint32(lbelow6)\n\tm11 += uint32(lbelow5)\n\n\tm11 <<= 8\n\tm1 += int64(m10)\n\n\tm1 += int64(m11)\n\tm12 &^= uint32(lbelow7)\n\n\tlbelow6 -= lbelow7\n\tm13 &^= uint32(lbelow8)\n\n\tm12 += uint32(lbelow6)\n\tlbelow7 -= lbelow8\n\n\tm12 <<= 16\n\tm13 += uint32(lbelow7)\n\n\tm13 <<= 24\n\tm1 += int64(m12)\n\n\tm1 += int64(m13)\n\tm2 = 2279\n\n\tlbelow9 = l - 9\n\tm3 = 2343\n\n\tlbelow10 = l - 10\n\tlbelow11 = l - 11\n\n\tlbelow9 >>= 31\n\tlbelow12 = l - 12\n\n\tlbelow10 >>= 31\n\tp += lbelow9\n\n\tm20 = uint32(m[p+8])\n\tlbelow11 >>= 31\n\tp += lbelow10\n\n\tm21 = uint32(m[p+9])\n\tlbelow12 >>= 31\n\tp += lbelow11\n\n\tm22 = uint32(m[p+10])\n\tm2 <<= 51\n\tp += lbelow12\n\n\tm23 = uint32(m[p+11])\n\tm20 &^= uint32(lbelow9)\n\tlbelow8 -= lbelow9\n\n\tm20 += uint32(lbelow8)\n\tlbelow9 -= lbelow10\n\n\tm21 &^= uint32(lbelow10)\n\tm21 += uint32(lbelow9)\n\n\tm21 <<= 8\n\tm2 += int64(m20)\n\n\tm2 += int64(m21)\n\tm22 &^= uint32(lbelow11)\n\n\tlbelow10 -= lbelow11\n\tm23 &^= uint32(lbelow12)\n\n\tm22 += uint32(lbelow10)\n\tlbelow11 -= lbelow12\n\n\tm22 <<= 16\n\tm23 += uint32(lbelow11)\n\n\tm23 <<= 24\n\tm2 += int64(m22)\n\n\tm3 <<= 51\n\tlbelow13 = l - 13\n\n\tlbelow13 >>= 31\n\tlbelow14 = l - 14\n\n\tlbelow14 >>= 31\n\tp += lbelow13\n\tlbelow15 = l - 15\n\n\tm30 = uint32(m[p+12])\n\tlbelow15 >>= 31\n\tp += lbelow14\n\n\tm31 = uint32(m[p+13])\n\tp += lbelow15\n\tm2 += int64(m23)\n\n\tm32 = uint32(m[p+14])\n\tm30 &^= uint32(lbelow13)\n\tlbelow12 -= lbelow13\n\n\tm30 += uint32(lbelow12)\n\tlbelow13 -= lbelow14\n\n\tm3 += int64(m30)\n\tm31 &^= uint32(lbelow14)\n\n\tm31 += uint32(lbelow13)\n\tm32 &^= uint32(lbelow15)\n\n\tm31 <<= 8\n\tlbelow14 -= lbelow15\n\n\tm3 += int64(m31)\n\tm32 += uint32(lbelow14)\n\td0 = m0\n\n\tm32 <<= 16\n\tm33 = uint64(lbelow15 + 1)\n\td1 = m1\n\n\tm33 <<= 24\n\tm3 += int64(m32)\n\td2 = m2\n\n\tm3 += int64(m33)\n\td3 = m3\n\n\tz3 = math.Float64frombits(uint64(d3))\n\n\tz2 = math.Float64frombits(uint64(d2))\n\n\tz1 = math.Float64frombits(uint64(d1))\n\n\tz0 = math.Float64frombits(uint64(d0))\n\n\tz3 -= alpha96\n\n\tz2 -= alpha64\n\n\tz1 -= alpha32\n\n\tz0 -= alpha0\n\n\th5 += z3\n\n\th3 += z2\n\n\th1 += z1\n\n\th0 += z0\n\n\ty7 = h7 + alpha130\n\n\ty6 = h6 + alpha130\n\n\ty1 = h1 + alpha32\n\n\ty0 = h0 + alpha32\n\n\ty7 -= alpha130\n\n\ty6 -= alpha130\n\n\ty1 -= alpha32\n\n\ty0 -= alpha32\n\n\ty5 = h5 + alpha96\n\n\ty4 = h4 + alpha96\n\n\tx7 = h7 - y7\n\ty7 *= scale\n\n\tx6 = h6 - y6\n\ty6 *= scale\n\n\tx1 = h1 - y1\n\n\tx0 = h0 - y0\n\n\ty5 -= alpha96\n\n\ty4 -= alpha96\n\n\tx1 += y7\n\n\tx0 += y6\n\n\tx7 += y5\n\n\tx6 += y4\n\n\ty3 = h3 + alpha64\n\n\ty2 = h2 + alpha64\n\n\tx0 += x1\n\n\tx6 += x7\n\n\ty3 -= alpha64\n\tr3low = r3low_stack\n\n\ty2 -= alpha64\n\tr0low = r0low_stack\n\n\tx5 = h5 - y5\n\tr3lowx0 = r3low * x0\n\tr3high = r3high_stack\n\n\tx4 = h4 - y4\n\tr0lowx6 = r0low * x6\n\tr0high = r0high_stack\n\n\tx3 = h3 - y3\n\tr3highx0 = r3high * x0\n\tsr1low = sr1low_stack\n\n\tx2 = h2 - y2\n\tr0highx6 = r0high * x6\n\tsr1high = sr1high_stack\n\n\tx5 += y3\n\tr0lowx0 = r0low * x0\n\tr1low = r1low_stack\n\n\th6 = r3lowx0 + r0lowx6\n\tsr1lowx6 = sr1low * x6\n\tr1high = r1high_stack\n\n\tx4 += y2\n\tr0highx0 = r0high * x0\n\tsr2low = sr2low_stack\n\n\th7 = r3highx0 + r0highx6\n\tsr1highx6 = sr1high * x6\n\tsr2high = sr2high_stack\n\n\tx3 += y1\n\tr1lowx0 = r1low * x0\n\tr2low = r2low_stack\n\n\th0 = r0lowx0 + sr1lowx6\n\tsr2lowx6 = sr2low * x6\n\tr2high = r2high_stack\n\n\tx2 += y0\n\tr1highx0 = r1high * x0\n\tsr3low = sr3low_stack\n\n\th1 = r0highx0 + sr1highx6\n\tsr2highx6 = sr2high * x6\n\tsr3high = sr3high_stack\n\n\tx4 += x5\n\tr2lowx0 = r2low * x0\n\n\th2 = r1lowx0 + sr2lowx6\n\tsr3lowx6 = sr3low * x6\n\n\tx2 += x3\n\tr2highx0 = r2high * x0\n\n\th3 = r1highx0 + sr2highx6\n\tsr3highx6 = sr3high * x6\n\n\tr1highx4 = r1high * x4\n\n\th4 = r2lowx0 + sr3lowx6\n\tr1lowx4 = r1low * x4\n\n\tr0highx4 = r0high * x4\n\n\th5 = r2highx0 + sr3highx6\n\tr0lowx4 = r0low * x4\n\n\th7 += r1highx4\n\tsr3highx4 = sr3high * x4\n\n\th6 += r1lowx4\n\tsr3lowx4 = sr3low * x4\n\n\th5 += r0highx4\n\tsr2highx4 = sr2high * x4\n\n\th4 += r0lowx4\n\tsr2lowx4 = sr2low * x4\n\n\th3 += sr3highx4\n\tr0lowx2 = r0low * x2\n\n\th2 += sr3lowx4\n\tr0highx2 = r0high * x2\n\n\th1 += sr2highx4\n\tr1lowx2 = r1low * x2\n\n\th0 += sr2lowx4\n\tr1highx2 = r1high * x2\n\n\th2 += r0lowx2\n\tr2lowx2 = r2low * x2\n\n\th3 += r0highx2\n\tr2highx2 = r2high * x2\n\n\th4 += r1lowx2\n\tsr3lowx2 = sr3low * x2\n\n\th5 += r1highx2\n\tsr3highx2 = sr3high * x2\n\n\th6 += r2lowx2\n\n\th7 += r2highx2\n\n\th0 += sr3lowx2\n\n\th1 += sr3highx2\n\nnomorebytes:\n\n\ty7 = h7 + alpha130\n\n\ty0 = h0 + alpha32\n\n\ty1 = h1 + alpha32\n\n\ty2 = h2 + alpha64\n\n\ty7 -= alpha130\n\n\ty3 = h3 + alpha64\n\n\ty4 = h4 + alpha96\n\n\ty5 = h5 + alpha96\n\n\tx7 = h7 - y7\n\ty7 *= scale\n\n\ty0 -= alpha32\n\n\ty1 -= alpha32\n\n\ty2 -= alpha64\n\n\th6 += x7\n\n\ty3 -= alpha64\n\n\ty4 -= alpha96\n\n\ty5 -= alpha96\n\n\ty6 = h6 + alpha130\n\n\tx0 = h0 - y0\n\n\tx1 = h1 - y1\n\n\tx2 = h2 - y2\n\n\ty6 -= alpha130\n\n\tx0 += y7\n\n\tx3 = h3 - y3\n\n\tx4 = h4 - y4\n\n\tx5 = h5 - y5\n\n\tx6 = h6 - y6\n\n\ty6 *= scale\n\n\tx2 += y0\n\n\tx3 += y1\n\n\tx4 += y2\n\n\tx0 += y6\n\n\tx5 += y3\n\n\tx6 += y4\n\n\tx2 += x3\n\n\tx0 += x1\n\n\tx4 += x5\n\n\tx6 += y5\n\n\tx2 += offset1\n\td1 = int64(math.Float64bits(x2))\n\n\tx0 += offset0\n\td0 = int64(math.Float64bits(x0))\n\n\tx4 += offset2\n\td2 = int64(math.Float64bits(x4))\n\n\tx6 += offset3\n\td3 = int64(math.Float64bits(x6))\n\n\tf0 = uint64(d0)\n\n\tf1 = uint64(d1)\n\tbits32 = math.MaxUint64\n\n\tf2 = uint64(d2)\n\tbits32 >>= 32\n\n\tf3 = uint64(d3)\n\tf = f0 >> 32\n\n\tf0 &= bits32\n\tf &= 255\n\n\tf1 += f\n\tg0 = f0 + 5\n\n\tg = g0 >> 32\n\tg0 &= bits32\n\n\tf = f1 >> 32\n\tf1 &= bits32\n\n\tf &= 255\n\tg1 = f1 + g\n\n\tg = g1 >> 32\n\tf2 += f\n\n\tf = f2 >> 32\n\tg1 &= bits32\n\n\tf2 &= bits32\n\tf &= 255\n\n\tf3 += f\n\tg2 = f2 + g\n\n\tg = g2 >> 32\n\tg2 &= bits32\n\n\tf4 = f3 >> 32\n\tf3 &= bits32\n\n\tf4 &= 255\n\tg3 = f3 + g\n\n\tg = g3 >> 32\n\tg3 &= bits32\n\n\tg4 = f4 + g\n\n\tg4 = g4 - 4\n\ts00 = uint32(s[0])\n\n\tf = uint64(int64(g4) >> 63)\n\ts01 = uint32(s[1])\n\n\tf0 &= f\n\tg0 &^= f\n\ts02 = uint32(s[2])\n\n\tf1 &= f\n\tf0 |= g0\n\ts03 = uint32(s[3])\n\n\tg1 &^= f\n\tf2 &= f\n\ts10 = uint32(s[4])\n\n\tf3 &= f\n\tg2 &^= f\n\ts11 = uint32(s[5])\n\n\tg3 &^= f\n\tf1 |= g1\n\ts12 = uint32(s[6])\n\n\tf2 |= g2\n\tf3 |= g3\n\ts13 = uint32(s[7])\n\n\ts01 <<= 8\n\tf0 += uint64(s00)\n\ts20 = uint32(s[8])\n\n\ts02 <<= 16\n\tf0 += uint64(s01)\n\ts21 = uint32(s[9])\n\n\ts03 <<= 24\n\tf0 += uint64(s02)\n\ts22 = uint32(s[10])\n\n\ts11 <<= 8\n\tf1 += uint64(s10)\n\ts23 = uint32(s[11])\n\n\ts12 <<= 16\n\tf1 += uint64(s11)\n\ts30 = uint32(s[12])\n\n\ts13 <<= 24\n\tf1 += uint64(s12)\n\ts31 = uint32(s[13])\n\n\tf0 += uint64(s03)\n\tf1 += uint64(s13)\n\ts32 = uint32(s[14])\n\n\ts21 <<= 8\n\tf2 += uint64(s20)\n\ts33 = uint32(s[15])\n\n\ts22 <<= 16\n\tf2 += uint64(s21)\n\n\ts23 <<= 24\n\tf2 += uint64(s22)\n\n\ts31 <<= 8\n\tf3 += uint64(s30)\n\n\ts32 <<= 16\n\tf3 += uint64(s31)\n\n\ts33 <<= 24\n\tf3 += uint64(s32)\n\n\tf2 += uint64(s23)\n\tf3 += uint64(s33)\n\n\tout[0] = byte(f0)\n\tf0 >>= 8\n\tout[1] = byte(f0)\n\tf0 >>= 8\n\tout[2] = byte(f0)\n\tf0 >>= 8\n\tout[3] = byte(f0)\n\tf0 >>= 8\n\tf1 += f0\n\n\tout[4] = byte(f1)\n\tf1 >>= 8\n\tout[5] = byte(f1)\n\tf1 >>= 8\n\tout[6] = byte(f1)\n\tf1 >>= 8\n\tout[7] = byte(f1)\n\tf1 >>= 8\n\tf2 += f1\n\n\tout[8] = byte(f2)\n\tf2 >>= 8\n\tout[9] = byte(f2)\n\tf2 >>= 8\n\tout[10] = byte(f2)\n\tf2 >>= 8\n\tout[11] = byte(f2)\n\tf2 >>= 8\n\tf3 += f2\n\n\tout[12] = byte(f3)\n\tf3 >>= 8\n\tout[13] = byte(f3)\n\tf3 >>= 8\n\tout[14] = byte(f3)\n\tf3 >>= 8\n\tout[15] = byte(f3)\n}" ]
[ "0.6541231", "0.65324295", "0.6508142", "0.6394101", "0.6275276", "0.61889166", "0.6151819", "0.61230177", "0.6094462", "0.6012163", "0.59496427", "0.59375423", "0.58425725", "0.5835805", "0.5791891", "0.5786533", "0.57692206", "0.57538235", "0.5743269", "0.57314247", "0.5699532", "0.56324303", "0.5618099", "0.5602387", "0.5591935", "0.55829966", "0.55701256", "0.55647606", "0.5560453", "0.55521804", "0.5546553", "0.55357647", "0.55307263", "0.55285037", "0.5520277", "0.5517758", "0.55099314", "0.5492303", "0.5481245", "0.5476936", "0.544334", "0.5431861", "0.5421204", "0.54185987", "0.54106563", "0.5406647", "0.54013956", "0.539515", "0.53904897", "0.53784245", "0.5359421", "0.53593105", "0.53437895", "0.5332927", "0.53313094", "0.5325005", "0.5324596", "0.5324596", "0.5324441", "0.5313197", "0.5310951", "0.5300216", "0.52865136", "0.52790207", "0.5260741", "0.5254729", "0.5241513", "0.5240875", "0.5240875", "0.5232389", "0.52241486", "0.52162236", "0.5200998", "0.51963943", "0.51959217", "0.5188279", "0.51869196", "0.5186318", "0.5185076", "0.51754564", "0.51720333", "0.51677555", "0.515562", "0.51425314", "0.51399016", "0.51375204", "0.5133705", "0.51330274", "0.5126414", "0.51215667", "0.51184976", "0.51169366", "0.51140994", "0.5102194", "0.5095428", "0.5091872", "0.50908697", "0.50905836", "0.50891566", "0.50791055" ]
0.80876577
0
newBinaryExprEvaluator returns a new instance of binaryExprEvaluator.
func newBinaryExprEvaluator(e *Executor, op Token, lhs, rhs processor) *binaryExprEvaluator { return &binaryExprEvaluator{ executor: e, op: op, lhs: lhs, rhs: rhs, c: make(chan map[string]interface{}, 0), done: make(chan chan struct{}, 0), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewBinary(left Expr, op *token.T, right Expr) *Binary {\n\treturn &Binary{\n\t\tLeft: left,\n\t\tOperator: op,\n\t\tRight: right,\n\t}\n}", "func NewBinaryBooleanExpression(op OP, lE, rE Evaluator) (Evaluator, error) {\n\tswitch op {\n\tcase AND, OR:\n\t\treturn &booleanNode{\n\t\t\top: op,\n\t\t\tlS: true,\n\t\t\tlE: lE,\n\t\t\trS: true,\n\t\t\trE: rE,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, errors.New(\"binary boolean expressions require the operation to be one for the follwing 'and', 'or'\")\n\t}\n}", "func NewBinaryExpr(op BinaryOp, lhs, rhs Expr) Expr {\n\t// assert(ExprWidth(lhs) == ExprWidth(rhs), \"binary expr width mismatch: op=%s (%T) %d != (%T) %d\", op, lhs, ExprWidth(lhs), rhs, ExprWidth(rhs))\n\n\tswitch op {\n\t// Arithmetic operators\n\tcase ADD:\n\t\treturn newAddExpr(lhs, rhs)\n\tcase SUB:\n\t\treturn newSubExpr(lhs, rhs)\n\tcase MUL:\n\t\treturn newMulExpr(lhs, rhs)\n\tcase UDIV, SDIV:\n\t\treturn newDivExpr(op, lhs, rhs)\n\tcase UREM, SREM:\n\t\treturn newRemExpr(op, lhs, rhs)\n\tcase AND:\n\t\treturn newAndExpr(lhs, rhs)\n\tcase OR:\n\t\treturn newOrExpr(lhs, rhs)\n\tcase XOR:\n\t\treturn newXorExpr(lhs, rhs)\n\tcase SHL:\n\t\treturn newShlExpr(lhs, rhs)\n\tcase LSHR:\n\t\treturn newLShrExpr(lhs, rhs)\n\tcase ASHR:\n\t\treturn newAShrExpr(lhs, rhs)\n\n\t// Comparison operators\n\tcase EQ:\n\t\treturn newEqExpr(lhs, rhs)\n\tcase NE:\n\t\treturn NewBinaryExpr(EQ, NewConstantExpr(0, WidthBool), NewBinaryExpr(EQ, lhs, rhs))\n\tcase ULT:\n\t\treturn newUltExpr(lhs, rhs)\n\tcase UGT:\n\t\treturn newUltExpr(rhs, lhs) // reverse\n\tcase ULE:\n\t\treturn newUleExpr(lhs, rhs)\n\tcase UGE:\n\t\treturn newUleExpr(rhs, lhs) // reverse\n\tcase SLT:\n\t\treturn newSltExpr(lhs, rhs)\n\tcase SGT:\n\t\treturn newSltExpr(rhs, lhs) // reverse\n\tcase SLE:\n\t\treturn newSleExpr(lhs, rhs)\n\tcase SGE:\n\t\treturn newSleExpr(rhs, lhs) // reverse\n\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n}", "func NewBinaryExpression(left Expression, op TokenType, right Expression) *BinaryExpression {\n\treturn &BinaryExpression{\n\t\tleft: left,\n\t\top: op,\n\t\tright: right,\n\t}\n}", "func newBinaryOp(op string, expr1, expr2 Expression) Expression {\n\tswitch {\n\tcase expr1 != nil && expr2 != nil:\n\t\treturn &BinaryOp{\n\t\t\tOp: op,\n\t\t\tExpr1: expr1,\n\t\t\tExpr2: expr2,\n\t\t}\n\tcase expr1 != nil && expr2 == nil:\n\t\treturn expr1\n\tcase expr1 == nil && expr2 != nil:\n\t\treturn expr2\n\tcase expr1 == nil && expr2 == nil:\n\t\treturn nil\n\t}\n\tpanic(\"unreachable\")\n}", "func NewBinary(op circuit.Operation, a, b, o *Wire) *Gate {\n\tgate := &Gate{\n\t\tOp: op,\n\t\tA: a,\n\t\tB: b,\n\t\tO: o,\n\t}\n\ta.AddOutput(gate)\n\tb.AddOutput(gate)\n\to.SetInput(gate)\n\n\treturn gate\n}", "func AsBinaryExpr(node ast.Node) *ast.BinaryExpr {\n\texpr, ok := node.(*ast.BinaryExpr)\n\tif !ok {\n\t\tpanic(\"expected *ast.BinaryExpr\")\n\t}\n\treturn expr\n}", "func evalBinaryStringExpr(ctx *Ctx, x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) {\n\tvar err error\n\tvar r string\n\tvar b bool\n\tis_bool := false\n\n\txx, yy := x.String(), y.String()\n\tswitch op {\n\tcase token.ADD:\tr = xx + yy\n\tcase token.EQL: b = xx == yy; is_bool = true\n\tcase token.NEQ: b = xx != yy; is_bool = true\n\tcase token.LEQ: b = xx <= yy; is_bool = true\n\tcase token.GEQ: b = xx >= yy; is_bool = true\n\tcase token.LSS: b = xx < yy; is_bool = true\n\tcase token.GTR: b = xx > yy; is_bool = true\n\tdefault: err = ErrInvalidOperands{x, op, y}\n\t}\n\tif is_bool {\n\t\treturn reflect.ValueOf(b), err\n\t} else {\n\t\treturn reflect.ValueOf(r).Convert(x.Type()), err\n\t}\n}", "func NewBinaryExpression(left Expression, op string, right Expression) Expression {\n\tlType := left.Type().GetKind()\n\trType := right.Type().GetKind()\n\n\tif lType != rType {\n\t\tpanic(\"mismatching types\")\n\t}\n\n\tvar retVal Expression\n\tswitch op {\n\tcase \"+\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerAddition{left: left, right: right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realAddition{left: left, right: right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"-\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerSubstraction{left: left, right: right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realSubstraction{left: left, right: right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"*\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerMultiplication{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realMultiplication{left: left, right: right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"/\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerDivision{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realDivision{left: left, right: right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"%\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerModulo{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"|\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerOr{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"^\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerXor{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"&\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerAnd{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"or\":\n\t\tif lType == BOOLEAN {\n\t\t\tretVal = &booleanOr{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"and\":\n\t\tif lType == BOOLEAN {\n\t\t\tretVal = &booleanAnd{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"==\":\n\t\tif lType == BOOLEAN {\n\t\t\tretVal = &booleanEqual{left, right}\n\t\t} else if lType == INTEGER {\n\t\t\tretVal = &integerEqual{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realEqual{left, right}\n\t\t} else if lType == FUNCTION {\n\t\t\tretVal = &funcEqual{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"!=\":\n\t\tif lType == BOOLEAN {\n\t\t\tretVal = &booleanNotEqual{left, right}\n\t\t} else if lType == INTEGER {\n\t\t\tretVal = &integerNotEqual{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realNotEqual{left, right}\n\t\t} else if lType == FUNCTION {\n\t\t\tretVal = &funcNotEqual{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"<\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerLessThan{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realLessThan{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \">\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerGreaterThan{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realGreaterThan{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"<=\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerLessOrEqual{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realLessOrEqual{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \">=\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerGreaterOrEqual{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realGreaterOrEqual{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tdefault:\n\t\tpanic(\"unsupported operand\")\n\t}\n\n\t// fmt.Println(retVal.String())\n\treturn retVal\n}", "func binary(typ int, od1 *expr, op string, od2 *expr) *expr {\n\treturn &expr{\n\t\tsexp: append(exprlist{atomic(typ, op)}, od1, od2),\n\t}\n}", "func evalBinaryIntExpr(ctx *Ctx, x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) {\n\tvar r int64\n\tvar err error\n\tvar b bool\n\tis_bool := false\n\n\txx, yy := x.Int(), y.Int()\n\tswitch op {\n\tcase token.ADD: r = xx + yy\n\tcase token.SUB: r = xx - yy\n\tcase token.MUL: r = xx * yy\n\tcase token.QUO: r = xx / yy\n\tcase token.REM: r = xx % yy\n\tcase token.AND: r = xx & yy\n\tcase token.OR: r = xx | yy\n\tcase token.XOR: r = xx ^ yy\n\tcase token.AND_NOT: r = xx &^ yy\n\tcase token.EQL: b = xx == yy; is_bool = true\n\tcase token.NEQ: b = xx != yy; is_bool = true\n\tcase token.LEQ: b = xx <= yy; is_bool = true\n\tcase token.GEQ: b = xx >= yy; is_bool = true\n\tcase token.LSS: b = xx < yy; is_bool = true\n\tcase token.GTR: b = xx > yy; is_bool = true\n\tdefault: err = ErrInvalidOperands{x, op, y}\n\t}\n\tif is_bool {\n\t\treturn reflect.ValueOf(b), err\n\t} else {\n\t\treturn reflect.ValueOf(r).Convert(x.Type()), err\n\t}\n}", "func (p *Planner) planBinaryExpr(e *Executor, expr *BinaryExpr) (processor, error) {\n\t// Create processor for LHS.\n\tlhs, err := p.planExpr(e, expr.LHS)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"lhs: %s\", err)\n\t}\n\n\t// Create processor for RHS.\n\trhs, err := p.planExpr(e, expr.RHS)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"rhs: %s\", err)\n\t}\n\n\t// Combine processors.\n\treturn newBinaryExprEvaluator(e, expr.Op, lhs, rhs), nil\n}", "func NewBinary(d []byte) *Binary {\n\treturn &Binary{d, -1}\n}", "func NewBinary(d []byte) *Binary {\n\treturn &Binary{d, -1}\n}", "func (er *expressionRewriter) constructBinaryOpFunction(l expression.Expression, r expression.Expression, op string) (expression.Expression, error) {\n\tlLen, rLen := expression.GetRowLen(l), expression.GetRowLen(r)\n\tif lLen == 1 && rLen == 1 {\n\t\treturn er.newFunction(op, types.NewFieldType(mysql.TypeTiny), l, r)\n\t} else if rLen != lLen {\n\t\treturn nil, expression.ErrOperandColumns.GenWithStackByArgs(lLen)\n\t}\n\tswitch op {\n\tcase ast.EQ, ast.NE:\n\t\tfuncs := make([]expression.Expression, lLen)\n\t\tfor i := 0; i < lLen; i++ {\n\t\t\tvar err error\n\t\t\tfuncs[i], err = er.constructBinaryOpFunction(expression.GetFuncArg(l, i), expression.GetFuncArg(r, i), op)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif op == ast.NE {\n\t\t\treturn expression.ComposeDNFCondition(er.sctx, funcs...), nil\n\t\t}\n\t\treturn expression.ComposeCNFCondition(er.sctx, funcs...), nil\n\tdefault:\n\t\tlarg0, rarg0 := expression.GetFuncArg(l, 0), expression.GetFuncArg(r, 0)\n\t\tvar expr1, expr2, expr3, expr4, expr5 expression.Expression\n\t\texpr1 = expression.NewFunctionInternal(er.sctx, ast.NE, types.NewFieldType(mysql.TypeTiny), larg0, rarg0)\n\t\texpr2 = expression.NewFunctionInternal(er.sctx, op, types.NewFieldType(mysql.TypeTiny), larg0, rarg0)\n\t\texpr3 = expression.NewFunctionInternal(er.sctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), expr1)\n\t\tvar err error\n\t\tl, err = expression.PopRowFirstArg(er.sctx, l)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr, err = expression.PopRowFirstArg(er.sctx, r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texpr4, err = er.constructBinaryOpFunction(l, r, op)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texpr5, err = er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr3, expression.Null, expr4)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr1, expr2, expr5)\n\t}\n}", "func ToBinaryExpr(x ast.Node) *ast.BinaryExpr {\n\tif x, ok := x.(*ast.BinaryExpr); ok {\n\t\treturn x\n\t}\n\treturn NilBinaryExpr\n}", "func NewEvaluationExpression(op OP, lB, rB string) (Evaluator, error) {\n\tl, r := strings.TrimSpace(lB), strings.TrimSpace(rB)\n\tif l == \"\" || r == \"\" {\n\t\treturn nil, fmt.Errorf(\"bindings cannot be empty; got %q, %q\", l, r)\n\t}\n\tswitch op {\n\tcase EQ, LT, GT:\n\t\treturn &evaluationNode{\n\t\t\top: op,\n\t\t\tlB: lB,\n\t\t\trB: rB,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, errors.New(\"evaluation expressions require the operation to be one for the follwing '=', '<', '>'\")\n\t}\n}", "func MakeBinValExpr(op string, eval valueEval) func(scanner parser.Scanner, a, b Expr) Expr {\n\treturn func(scanner parser.Scanner, a, b Expr) Expr {\n\t\treturn newBinExpr(scanner, a, b, op, \"(%s \"+op+\" %s)\",\n\t\t\tfunc(ctx context.Context, a, b Value, _ Scope) (Value, error) {\n\t\t\t\treturn eval(a, b), nil\n\t\t\t})\n\t}\n}", "func NewBitOp(left, right sql.Expression, op string) *BitOp {\n\treturn &BitOp{BinaryExpression{Left: left, Right: right}, op}\n}", "func (er *expressionRewriter) constructBinaryOpFunction(l expression.Expression, r expression.Expression, op string) (expression.Expression, error) {\n\ttrace_util_0.Count(_expression_rewriter_00000, 33)\n\tlLen, rLen := expression.GetRowLen(l), expression.GetRowLen(r)\n\tif lLen == 1 && rLen == 1 {\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 35)\n\t\treturn er.newFunction(op, types.NewFieldType(mysql.TypeTiny), l, r)\n\t} else {\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 36)\n\t\tif rLen != lLen {\n\t\t\ttrace_util_0.Count(_expression_rewriter_00000, 37)\n\t\t\treturn nil, expression.ErrOperandColumns.GenWithStackByArgs(lLen)\n\t\t}\n\t}\n\ttrace_util_0.Count(_expression_rewriter_00000, 34)\n\tswitch op {\n\tcase ast.EQ, ast.NE, ast.NullEQ:\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 38)\n\t\tfuncs := make([]expression.Expression, lLen)\n\t\tfor i := 0; i < lLen; i++ {\n\t\t\ttrace_util_0.Count(_expression_rewriter_00000, 46)\n\t\t\tvar err error\n\t\t\tfuncs[i], err = er.constructBinaryOpFunction(expression.GetFuncArg(l, i), expression.GetFuncArg(r, i), op)\n\t\t\tif err != nil {\n\t\t\t\ttrace_util_0.Count(_expression_rewriter_00000, 47)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 39)\n\t\tif op == ast.NE {\n\t\t\ttrace_util_0.Count(_expression_rewriter_00000, 48)\n\t\t\treturn expression.ComposeDNFCondition(er.ctx, funcs...), nil\n\t\t}\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 40)\n\t\treturn expression.ComposeCNFCondition(er.ctx, funcs...), nil\n\tdefault:\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 41)\n\t\tlarg0, rarg0 := expression.GetFuncArg(l, 0), expression.GetFuncArg(r, 0)\n\t\tvar expr1, expr2, expr3, expr4, expr5 expression.Expression\n\t\texpr1 = expression.NewFunctionInternal(er.ctx, ast.NE, types.NewFieldType(mysql.TypeTiny), larg0, rarg0)\n\t\texpr2 = expression.NewFunctionInternal(er.ctx, op, types.NewFieldType(mysql.TypeTiny), larg0, rarg0)\n\t\texpr3 = expression.NewFunctionInternal(er.ctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), expr1)\n\t\tvar err error\n\t\tl, err = expression.PopRowFirstArg(er.ctx, l)\n\t\tif err != nil {\n\t\t\ttrace_util_0.Count(_expression_rewriter_00000, 49)\n\t\t\treturn nil, err\n\t\t}\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 42)\n\t\tr, err = expression.PopRowFirstArg(er.ctx, r)\n\t\tif err != nil {\n\t\t\ttrace_util_0.Count(_expression_rewriter_00000, 50)\n\t\t\treturn nil, err\n\t\t}\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 43)\n\t\texpr4, err = er.constructBinaryOpFunction(l, r, op)\n\t\tif err != nil {\n\t\t\ttrace_util_0.Count(_expression_rewriter_00000, 51)\n\t\t\treturn nil, err\n\t\t}\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 44)\n\t\texpr5, err = er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr3, expression.Null, expr4)\n\t\tif err != nil {\n\t\t\ttrace_util_0.Count(_expression_rewriter_00000, 52)\n\t\t\treturn nil, err\n\t\t}\n\t\ttrace_util_0.Count(_expression_rewriter_00000, 45)\n\t\treturn er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr1, expr2, expr5)\n\t}\n}", "func newBinaryTreeNode(value int) *BinaryTreeNode {\n\treturn &BinaryTreeNode{\n\t\tData: value,\n\t\tLeft: nil,\n\t\tRight: nil,\n\t}\n}", "func TestCompiler_Compile_binaryExpr(t *testing.T) {\n\texpr := ast.BinaryExpr{\n\t\tX: ast.ScalarExpr{Val: \"1\", Typ: token.INT},\n\t\tOp: token.ADD,\n\t\tY: ast.BinaryExpr{\n\t\t\tX: ast.ScalarExpr{Val: \"2\", Typ: token.INT},\n\t\t\tOp: token.MUL,\n\t\t\tY: ast.ScalarExpr{Val: \"2\", Typ: token.INT},\n\t\t},\n\t}\n\tc := NewCompiler()\n\texpected := []Instruction{\n\t\t{Op: MULTIPLY, Arg1: Argument{Val: \"2\", ValType: INTEGER}, Arg2: Argument{Val: \"2\", ValType: INTEGER}, Ret: Argument{TVal: 1}},\n\t\t{Op: ADD, Arg1: Argument{Val: \"1\", ValType: INTEGER}, Arg2: Argument{TVal: 1}, Ret: Argument{TVal: 2}},\n\t}\n\tinsts := c.Compile(expr)\n\tif insts.tvals != 2 {\n\t\tt.Errorf(\"expected 2 got %d\", insts.tvals)\n\t}\n\tfor i, actual := range insts.instructions {\n\t\tif compareInstrucions(expected[i], actual) == false {\n\t\t\tt.Errorf(\"expected %s got %s\", expected, actual)\n\t\t}\n\t}\n}", "func evalBinaryFloatExpr(ctx *Ctx, x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) {\n\tvar err error\n\tvar r float64\n\n\txx, yy := x.Float(), y.Float()\n\tswitch op {\n\tcase token.ADD: r = xx + yy\n\tcase token.SUB: r = xx - yy\n\tcase token.MUL: r = xx * yy\n\tcase token.QUO: r = xx / yy\n\t// case token.EQL: b = xx == yy\n\t// case token.LSS: b = xx < yy\n\t// case token.GTR: b = xx > yy\n\tdefault: err = ErrInvalidOperands{x, op, y}\n\t}\n\treturn reflect.ValueOf(r).Convert(x.Type()), err\n}", "func NewBinaryMutator(probability float64) *MutatorGeneBase {\n\tmutator := NewGeneBaseMutator(new(BinaryMutator), probability)\n\n\treturn mutator\n}", "func parseBinary(lex *lexer, prec1 int) Expr {\n\tlhs := parseUnary(lex)\n\tfor prec := precedence(lex.token); prec >= prec1; prec-- {\n\t\tfor precedence(lex.token) == prec {\n\t\t\top := lex.token\n\t\t\tlex.next() // consume operator\n\t\t\trhs := parseBinary(lex, prec+1)\n\t\t\tlhs = binary{op, lhs, rhs}\n\t\t}\n\t}\n\treturn lhs\n}", "func (p *Parser) expr() value.Expr {\n\ttok := p.next()\n\texpr := p.operand(tok, true)\n\ttok = p.peek()\n\tswitch tok.Type {\n\tcase scan.EOF, scan.RightParen, scan.RightBrack, scan.Semicolon, scan.Colon:\n\t\treturn expr\n\tcase scan.Identifier:\n\t\tif p.context.DefinedBinary(tok.Text) {\n\t\t\tp.next()\n\t\t\treturn &binary{\n\t\t\t\tleft: expr,\n\t\t\t\top: tok.Text,\n\t\t\t\tright: p.expr(),\n\t\t\t}\n\t\t}\n\tcase scan.Assign:\n\t\tp.next()\n\t\tswitch lhs := expr.(type) {\n\t\tcase *variableExpr, *index:\n\t\t\treturn &binary{\n\t\t\t\tleft: lhs,\n\t\t\t\top: tok.Text,\n\t\t\t\tright: p.expr(),\n\t\t\t}\n\t\tcase sliceExpr:\n\t\t\tfor _, v := range lhs {\n\t\t\t\tif _, ok := v.(*variableExpr); !ok {\n\t\t\t\t\tp.errorf(\"cannot assign to %s\", v.ProgString())\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn &binary{\n\t\t\t\tleft: lhs,\n\t\t\t\top: tok.Text,\n\t\t\t\tright: p.expr(),\n\t\t\t}\n\t\t}\n\t\tp.errorf(\"cannot assign to %s\", expr.ProgString())\n\tcase scan.Operator:\n\t\tp.next()\n\t\treturn &binary{\n\t\t\tleft: expr,\n\t\t\top: tok.Text,\n\t\t\tright: p.expr(),\n\t\t}\n\t}\n\tp.errorf(\"after expression: unexpected %s\", p.peek())\n\treturn nil\n}", "func internalNewEvaluator(ce []ConsumedElement) (Evaluator, []ConsumedElement, error) {\n\tif len(ce) == 0 {\n\t\treturn nil, nil, errors.New(\"cannot create an evaluator from an empty sequence of tokens\")\n\t}\n\thead, tail := ce[0], ce[1:]\n\ttkn := head.Token()\n\n\t// Not token\n\tif tkn.Type == lexer.ItemNot {\n\t\ttailEval, tailCEs, err := internalNewEvaluator(tail)\n\t\tif err != nil {\n\t\t\treturn nil, tailCEs, err\n\t\t}\n\t\te, err := NewUnaryBooleanExpression(NOT, tailEval)\n\t\tif err != nil {\n\t\t\treturn nil, tailCEs, err\n\t\t}\n\t\treturn e, tailCEs, nil\n\t}\n\n\t// Binding token\n\tif tkn.Type == lexer.ItemBinding {\n\t\tif len(tail) < 2 {\n\t\t\treturn nil, nil, fmt.Errorf(\"cannot create a binary evaluation operand for %v\", ce)\n\t\t}\n\t\topTkn, bndTkn := tail[0].Token(), tail[1].Token()\n\t\tvar op OP\n\t\tswitch opTkn.Type {\n\t\tcase lexer.ItemEQ:\n\t\t\top = EQ\n\t\tcase lexer.ItemLT:\n\t\t\top = LT\n\t\tcase lexer.ItemGT:\n\t\t\top = GT\n\t\tdefault:\n\t\t\treturn nil, nil, fmt.Errorf(\"cannot create a binary evaluation operand for %v\", opTkn)\n\t\t}\n\t\tif bndTkn.Type == lexer.ItemBinding {\n\t\t\te, err := NewEvaluationExpression(op, tkn.Text, bndTkn.Text)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tvar res []ConsumedElement\n\t\t\tif len(tail) > 2 {\n\t\t\t\tres = tail[2:]\n\t\t\t}\n\t\t\treturn e, res, nil\n\t\t}\n\t\treturn nil, nil, fmt.Errorf(\"cannot build a binary evaluation operand with right operant %v\", bndTkn)\n\t}\n\n\t// LPar Token\n\tif tkn.Type == lexer.ItemLPar {\n\t\ttailEval, ce, err := internalNewEvaluator(tail)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif len(ce) < 1 {\n\t\t\treturn nil, nil, errors.New(\"incomplete parentesis expression; missing ')'\")\n\t\t}\n\t\thead, tail = ce[0], ce[1:]\n\t\tif head.Token().Type != lexer.ItemRPar {\n\t\t\treturn nil, nil, fmt.Errorf(\"missing right parentesis in expression; found %v instead\", head)\n\t\t}\n\t\tif len(tail) > 1 {\n\t\t\t// Binary boolean expression.\n\t\t\topTkn := tail[0].Token()\n\t\t\tvar op OP\n\t\t\tswitch opTkn.Type {\n\t\t\tcase lexer.ItemAnd:\n\t\t\t\top = AND\n\t\t\tcase lexer.ItemOr:\n\t\t\t\top = OR\n\t\t\tdefault:\n\t\t\t\treturn nil, nil, fmt.Errorf(\"cannot create a binary boolean evaluation operand for %v\", opTkn)\n\t\t\t}\n\t\t\trTailEval, ceResTail, err := internalNewEvaluator(tail[1:])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tev, err := NewBinaryBooleanExpression(op, tailEval, rTailEval)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\treturn ev, ceResTail, nil\n\t\t}\n\t\treturn tailEval, tail, nil\n\t}\n\n\tvar tkns []string\n\tfor _, e := range ce {\n\t\ttkns = append(tkns, fmt.Sprintf(\"%q\", e.token.Type))\n\t}\n\treturn nil, nil, fmt.Errorf(\"could not create an evaluator for condition {%s}\", strings.Join(tkns, \",\"))\n}", "func NewBinaryApplier(dst io.Writer, src io.ReaderAt) *BinaryApplier {\n\ta := BinaryApplier{\n\t\tdst: dst,\n\t\tsrc: src,\n\t}\n\treturn &a\n}", "func newBinaryExprGuard(expr *influxql.BinaryExpr) *exprGuard {\n\t// if it's a nested binary expression, always match.\n\tif _, ok := expr.LHS.(*influxql.BinaryExpr); ok {\n\t\treturn nil\n\t} else if _, ok := expr.RHS.(*influxql.BinaryExpr); ok {\n\t\treturn nil\n\t}\n\n\t// ensure one of the expressions is a VarRef, and make that the key.\n\tkey, ok := expr.LHS.(*influxql.VarRef)\n\tvalue := expr.RHS\n\tif !ok {\n\t\tkey, ok = expr.RHS.(*influxql.VarRef)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\t\tvalue = expr.LHS\n\t}\n\n\t// check the key for situations we know we can't filter.\n\tif key.Val != \"_name\" && key.Type != influxql.Unknown && key.Type != influxql.Tag {\n\t\treturn nil\n\t}\n\n\t// scrutinize the value to return an efficient guard.\n\tswitch value := value.(type) {\n\tcase *influxql.StringLiteral:\n\t\tval := []byte(value.Val)\n\t\tg := &exprGuard{tagMatches: &tagGuard{\n\t\t\tmeas: key.Val == \"_name\",\n\t\t\tkey: []byte(key.Val),\n\t\t}}\n\n\t\tswitch expr.Op {\n\t\tcase influxql.EQ:\n\t\t\tg.tagMatches.op = func(x []byte) bool { return bytes.Equal(val, x) }\n\n\t\tcase influxql.NEQ:\n\t\t\tg.tagMatches.op = func(x []byte) bool { return !bytes.Equal(val, x) }\n\n\t\tdefault: // any other operator isn't valid. conservatively match everything.\n\t\t\treturn nil\n\t\t}\n\n\t\treturn g\n\n\tcase *influxql.RegexLiteral:\n\t\t// There's a tradeoff between being precise and being fast. For example, if the\n\t\t// delete includes a very expensive regex, we don't want to run that against every\n\t\t// incoming point. The decision here is to match any point that has a possibly\n\t\t// expensive match if there is any overlap on the tags. In other words, expensive\n\t\t// matches get transformed into trivially matching everything.\n\t\treturn &exprGuard{tagExists: map[string]struct{}{key.Val: {}}}\n\n\tcase *influxql.VarRef:\n\t\t// We could do a better job here by encoding the two names and checking the points\n\t\t// against them, but I'm not quite sure how to do that. Be conservative and match\n\t\t// any points that contain either the key or value.\n\n\t\t// since every point has a measurement, always match if either are on the measurement.\n\t\tif key.Val == \"_name\" || value.Val == \"_name\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn &exprGuard{tagExists: map[string]struct{}{\n\t\t\tkey.Val: {},\n\t\t\tvalue.Val: {},\n\t\t}}\n\n\tdefault: // any other value type matches everything\n\t\treturn nil\n\t}\n}", "func NewBinaryTree(vals ...interface{}) (res *BinaryTreeNode, err error) {\n\tif len(vals) == 0 {\n\t\treturn nil, ErrEmptyInput\n\t}\n\tif res, err = createNode(vals[0]); err != nil {\n\t\treturn\n\t}\n\terr = buildTree([]*BinaryTreeNode{res}, 1, vals)\n\treturn\n}", "func NewEvaluator() *Evaluator {\n\tresult := &Evaluator{}\n\tresult.cnvtr = newInfixConverter()\n\tresult.tknzr = newInfixTokenizer()\n\tresult.functions = make(map[string]functions.Function)\n\tresult.operators = make(map[string]operators.Operator)\n\tresult.stack = make([]interface{}, 0)\n\treturn result\n}", "func NewBinaryFrame(p []byte) Frame {\n\treturn NewFrame(OpBinary, true, p)\n}", "func (*Base) Binary(p ASTPass, node *ast.Binary, ctx Context) {\n\tp.Visit(p, &node.Left, ctx)\n\tp.Fodder(p, &node.OpFodder, ctx)\n\tp.Visit(p, &node.Right, ctx)\n}", "func NewBinaryEqualsFunc(key Key, values ...string) (Function, error) {\n\tsset := set.CreateStringSet(values...)\n\tif err := validateBinaryEqualsValues(binaryEquals, key, sset); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &binaryEqualsFunc{key, sset}, nil\n}", "func (b *BinaryExpr) Evaluate(env ExpressionEnv) (EvalResult, error) {\n\tlVal, err := b.Left.Evaluate(env)\n\tif err != nil {\n\t\treturn EvalResult{}, err\n\t}\n\trVal, err := b.Right.Evaluate(env)\n\tif err != nil {\n\t\treturn EvalResult{}, err\n\t}\n\treturn b.Op.Evaluate(lVal, rVal)\n}", "func newBinaryEqualsFunc(key Key, values ValueSet) (Function, error) {\n\tvalueStrings, err := valuesToStringSlice(binaryEquals, values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewBinaryEqualsFunc(key, valueStrings...)\n}", "func NewBinary(rdr io.ReadSeekCloser) (*Binary, error) {\n\tn, err := rdr.Seek(0, io.SeekEnd)\n\tif err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, err\n\t}\n\treturn &Binary{\n\t\tReadSeekCloser: rdr,\n\t\tsize: n,\n\t}, nil\n}", "func _b(x interface{}, op string, y interface{}) ast.Expr {\n\tvar xx, yx ast.Expr\n\tif xstr, ok := x.(string); ok {\n\t\txx = _x(xstr)\n\t} else {\n\t\txx = x.(ast.Expr)\n\t}\n\tif ystr, ok := y.(string); ok {\n\t\tyx = _x(ystr)\n\t} else {\n\t\tyx = y.(ast.Expr)\n\t}\n\treturn &ast.BinaryExpr{\n\t\tX: xx,\n\t\tOp: _op(op),\n\t\tY: yx,\n\t}\n}", "func evalBinaryUintExpr(ctx *Ctx, x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) {\n\tvar err error\n\tvar r uint64\n\tvar b bool\n\tis_bool := false\n\n\txx, yy := x.Uint(), y.Uint()\n\tswitch op {\n\tcase token.ADD: r = xx + yy\n\tcase token.SUB: r = xx - yy\n\tcase token.MUL: r = xx * yy\n\tcase token.QUO: r = xx / yy\n\tcase token.REM: r = xx % yy\n\tcase token.AND: r = xx & yy\n\tcase token.OR: r = xx | yy\n\tcase token.XOR: r = xx ^ yy\n\tcase token.AND_NOT: r = xx &^ yy\n\tcase token.EQL: b = xx == yy; is_bool = true\n\tcase token.NEQ: b = xx != yy; is_bool = true\n\tcase token.LEQ: b = xx <= yy; is_bool = true\n\tcase token.GEQ: b = xx >= yy; is_bool = true\n\tcase token.LSS: b = xx < yy; is_bool = true\n\tcase token.GTR: b = xx > yy; is_bool = true\n\tdefault: err = ErrInvalidOperands{x, op, y}\n\t}\n\tif is_bool {\n\t\treturn reflect.ValueOf(b), err\n\t} else {\n\t\treturn reflect.ValueOf(r).Convert(x.Type()), err\n\t}\n}", "func Binary(subtype byte, data []byte) Val {\n\treturn Val{t: bsontype.Binary, primitive: primitive.Binary{Subtype: subtype, Data: data}}\n}", "func init() {\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenVerticalBarVerticalBar,\n\t\tleftBindingPower: exprLeftBindingPowerLogicalOr,\n\t\toperation: ast.OperationOr,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenAmpersandAmpersand,\n\t\tleftBindingPower: exprLeftBindingPowerLogicalAnd,\n\t\toperation: ast.OperationAnd,\n\t})\n\n\tdefineLessThanOrTypeArgumentsExpression()\n\tdefineGreaterThanOrBitwiseRightShiftExpression()\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenLessEqual,\n\t\tleftBindingPower: exprLeftBindingPowerComparison,\n\t\toperation: ast.OperationLessEqual,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenGreaterEqual,\n\t\tleftBindingPower: exprLeftBindingPowerComparison,\n\t\toperation: ast.OperationGreaterEqual,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenEqualEqual,\n\t\tleftBindingPower: exprLeftBindingPowerComparison,\n\t\toperation: ast.OperationEqual,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenNotEqual,\n\t\tleftBindingPower: exprLeftBindingPowerComparison,\n\t\toperation: ast.OperationNotEqual,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenDoubleQuestionMark,\n\t\tleftBindingPower: exprLeftBindingPowerNilCoalescing,\n\t\toperation: ast.OperationNilCoalesce,\n\t\trightAssociative: true,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenVerticalBar,\n\t\tleftBindingPower: exprLeftBindingPowerBitwiseOr,\n\t\toperation: ast.OperationBitwiseOr,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenCaret,\n\t\tleftBindingPower: exprLeftBindingPowerBitwiseXor,\n\t\toperation: ast.OperationBitwiseXor,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenAmpersand,\n\t\tleftBindingPower: exprLeftBindingPowerBitwiseAnd,\n\t\toperation: ast.OperationBitwiseAnd,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenLessLess,\n\t\tleftBindingPower: exprLeftBindingPowerBitwiseShift,\n\t\toperation: ast.OperationBitwiseLeftShift,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenPlus,\n\t\tleftBindingPower: exprLeftBindingPowerAddition,\n\t\toperation: ast.OperationPlus,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenMinus,\n\t\tleftBindingPower: exprLeftBindingPowerAddition,\n\t\toperation: ast.OperationMinus,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenStar,\n\t\tleftBindingPower: exprLeftBindingPowerMultiplication,\n\t\toperation: ast.OperationMul,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenSlash,\n\t\tleftBindingPower: exprLeftBindingPowerMultiplication,\n\t\toperation: ast.OperationDiv,\n\t})\n\n\tdefineExpr(binaryExpr{\n\t\ttokenType: lexer.TokenPercent,\n\t\tleftBindingPower: exprLeftBindingPowerMultiplication,\n\t\toperation: ast.OperationMod,\n\t})\n\n\tdefineIdentifierLeftDenotations()\n\n\tdefineExpr(literalExpr{\n\t\ttokenType: lexer.TokenBinaryIntegerLiteral,\n\t\tnullDenotation: func(p *parser, token lexer.Token) (ast.Expression, error) {\n\t\t\tliteral := p.tokenSource(token)\n\t\t\treturn parseIntegerLiteral(\n\t\t\t\tp,\n\t\t\t\tliteral,\n\t\t\t\tliteral[2:],\n\t\t\t\tcommon.IntegerLiteralKindBinary,\n\t\t\t\ttoken.Range,\n\t\t\t), nil\n\t\t},\n\t})\n\n\tdefineExpr(literalExpr{\n\t\ttokenType: lexer.TokenOctalIntegerLiteral,\n\t\tnullDenotation: func(p *parser, token lexer.Token) (ast.Expression, error) {\n\t\t\tliteral := p.tokenSource(token)\n\t\t\treturn parseIntegerLiteral(\n\t\t\t\tp,\n\t\t\t\tliteral,\n\t\t\t\tliteral[2:],\n\t\t\t\tcommon.IntegerLiteralKindOctal,\n\t\t\t\ttoken.Range,\n\t\t\t), nil\n\t\t},\n\t})\n\n\tdefineExpr(literalExpr{\n\t\ttokenType: lexer.TokenDecimalIntegerLiteral,\n\t\tnullDenotation: func(p *parser, token lexer.Token) (ast.Expression, error) {\n\t\t\tliteral := p.tokenSource(token)\n\t\t\treturn parseIntegerLiteral(\n\t\t\t\tp,\n\t\t\t\tliteral,\n\t\t\t\tliteral,\n\t\t\t\tcommon.IntegerLiteralKindDecimal,\n\t\t\t\ttoken.Range,\n\t\t\t), nil\n\t\t},\n\t})\n\n\tdefineExpr(literalExpr{\n\t\ttokenType: lexer.TokenHexadecimalIntegerLiteral,\n\t\tnullDenotation: func(p *parser, token lexer.Token) (ast.Expression, error) {\n\t\t\tliteral := p.tokenSource(token)\n\t\t\treturn parseIntegerLiteral(\n\t\t\t\tp,\n\t\t\t\tliteral,\n\t\t\t\tliteral[2:],\n\t\t\t\tcommon.IntegerLiteralKindHexadecimal,\n\t\t\t\ttoken.Range,\n\t\t\t), nil\n\t\t},\n\t})\n\n\tdefineExpr(literalExpr{\n\t\ttokenType: lexer.TokenUnknownBaseIntegerLiteral,\n\t\tnullDenotation: func(p *parser, token lexer.Token) (ast.Expression, error) {\n\t\t\tliteral := p.tokenSource(token)\n\t\t\treturn parseIntegerLiteral(\n\t\t\t\tp,\n\t\t\t\tliteral,\n\t\t\t\tliteral[2:],\n\t\t\t\tcommon.IntegerLiteralKindUnknown,\n\t\t\t\ttoken.Range,\n\t\t\t), nil\n\t\t},\n\t})\n\n\tdefineExpr(literalExpr{\n\t\ttokenType: lexer.TokenFixedPointNumberLiteral,\n\t\tnullDenotation: func(p *parser, token lexer.Token) (ast.Expression, error) {\n\t\t\tliteral := p.tokenSource(token)\n\t\t\treturn parseFixedPointLiteral(\n\t\t\t\tp,\n\t\t\t\tliteral,\n\t\t\t\ttoken.Range,\n\t\t\t), nil\n\t\t},\n\t})\n\n\tdefineExpr(literalExpr{\n\t\ttokenType: lexer.TokenString,\n\t\tnullDenotation: func(p *parser, token lexer.Token) (ast.Expression, error) {\n\t\t\tliteral := p.tokenSource(token)\n\t\t\tparsedString := parseStringLiteral(p, literal)\n\t\t\treturn ast.NewStringExpression(\n\t\t\t\tp.memoryGauge,\n\t\t\t\tparsedString,\n\t\t\t\ttoken.Range,\n\t\t\t), nil\n\t\t},\n\t})\n\n\tdefineExpr(prefixExpr{\n\t\ttokenType: lexer.TokenMinus,\n\t\tbindingPower: exprLeftBindingPowerUnaryPrefix,\n\t\tnullDenotation: func(p *parser, right ast.Expression, tokenRange ast.Range) (ast.Expression, error) {\n\t\t\tswitch right := right.(type) {\n\t\t\tcase *ast.IntegerExpression:\n\t\t\t\tif right.Value.Sign() > 0 {\n\t\t\t\t\tif right.Value != nil {\n\t\t\t\t\t\tright.Value.Neg(right.Value)\n\t\t\t\t\t}\n\t\t\t\t\tright.StartPos = tokenRange.StartPos\n\t\t\t\t\treturn right, nil\n\t\t\t\t}\n\n\t\t\tcase *ast.FixedPointExpression:\n\t\t\t\tif !right.Negative {\n\t\t\t\t\tright.Negative = !right.Negative\n\t\t\t\t\tright.StartPos = tokenRange.StartPos\n\t\t\t\t\treturn right, nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn ast.NewUnaryExpression(\n\t\t\t\tp.memoryGauge,\n\t\t\t\tast.OperationMinus,\n\t\t\t\tright,\n\t\t\t\ttokenRange.StartPos,\n\t\t\t), nil\n\t\t},\n\t})\n\n\tdefineExpr(unaryExpr{\n\t\ttokenType: lexer.TokenExclamationMark,\n\t\tbindingPower: exprLeftBindingPowerUnaryPrefix,\n\t\toperation: ast.OperationNegate,\n\t})\n\n\tdefineExpr(unaryExpr{\n\t\ttokenType: lexer.TokenLeftArrow,\n\t\tbindingPower: exprLeftBindingPowerUnaryPrefix,\n\t\toperation: ast.OperationMove,\n\t})\n\n\tdefineExpr(postfixExpr{\n\t\ttokenType: lexer.TokenExclamationMark,\n\t\tbindingPower: exprLeftBindingPowerUnaryPostfix,\n\t\tleftDenotation: func(p *parser, left ast.Expression, tokenRange ast.Range) (ast.Expression, error) {\n\t\t\treturn ast.NewForceExpression(\n\t\t\t\tp.memoryGauge,\n\t\t\t\tleft,\n\t\t\t\ttokenRange.EndPos,\n\t\t\t), nil\n\t\t},\n\t})\n\n\tdefineNestedExpression()\n\tdefineInvocationExpression()\n\tdefineArrayExpression()\n\tdefineDictionaryExpression()\n\tdefineIndexExpression()\n\tdefinePathExpression()\n\tdefineConditionalExpression()\n\tdefineReferenceExpression()\n\tdefineMemberExpression()\n\tdefineIdentifierExpression()\n\n\tsetExprNullDenotation(lexer.TokenEOF, func(parser *parser, token lexer.Token) (ast.Expression, error) {\n\t\treturn nil, NewSyntaxError(token.StartPos, \"unexpected end of program\")\n\t})\n}", "func NewBinarySearcher(min, max, prec int) Searcher {\n\tif min >= max {\n\t\tpanic(errors.Errorf(\"min must be less than max; min=%v, max=%v\", min, max))\n\t}\n\tif prec < 1 {\n\t\tpanic(errors.Errorf(\"precision must be >= 1; prec=%v\", prec))\n\t}\n\treturn &binarySearcher{\n\t\tss: searchSpace{\n\t\t\tmin: min,\n\t\t\tmax: max,\n\t\t},\n\t\tcur: mid(min, max),\n\t\tprec: prec,\n\t}\n}", "func NewBinary(params BinaryParams) *Binary {\n\tb := &Binary{\n\t\tfile: params.File,\n\t\tid: params.ID,\n\t\tversion: params.Version,\n\t\tarch: params.Arch,\n\t\turl: params.URL,\n\t\tchecksumList: checksum.NewChecksums(checksum.NewInternalChecksum(params.ID, params.Version, params.Arch)),\n\t}\n\tif len(params.ChecksumList) != 0 {\n\t\tb.checksumList.Append(params.ChecksumList...)\n\t}\n\treturn b\n}", "func NewBinaryScanner(img *binimg.Image) Scanner {\n\treturn &binaryScanner{img}\n}", "func NewBinarySearch() *BinarySearch {\n\treturn &BinarySearch{}\n}", "func NewBinaryTree() *BinaryTree {\n\tvar t BinaryTree\n\treturn &t\n}", "func binaryEval(binaryOp stmt.BinaryOP, left, right *collections.FloatArray) *collections.FloatArray {\n\tif left == nil || right == nil {\n\t\treturn nil\n\t}\n\tif left.IsEmpty() && right.IsEmpty() {\n\t\treturn nil\n\t}\n\n\tcapacity := left.Capacity()\n\tresult := collections.NewFloatArray(capacity)\n\n\tfor i := 0; i < capacity; i++ {\n\t\tleftHasValue := left.HasValue(i)\n\t\trightHasValue := right.HasValue(i)\n\t\tswitch {\n\t\tcase !leftHasValue && right.IsSingle():\n\t\tcase left.IsSingle() && !rightHasValue:\n\t\tcase leftHasValue || rightHasValue:\n\t\t\tresult.SetValue(i, eval(binaryOp, left.GetValue(i), right.GetValue(i)))\n\t\t}\n\t}\n\n\treturn result\n}", "func New() *Evaluator {\n\teval := &Evaluator{\n\t\tCtxt: lexer.Context{Line: 1, Col: 1, Ctxt: \"\"},\n\t\tloopcount: 0,\n\t}\n\treturn eval\n}", "func BinaryOp(x Value, op token.Token, y Value) Value {\n\tvx, okx := x.(*ratVal)\n\tvy, oky := y.(*ratVal)\n\tif okx || oky {\n\t\tif okx {\n\t\t\tx = vx.Value\n\t\t}\n\t\tif oky {\n\t\t\ty = vy.Value\n\t\t}\n\t\tret := constant.BinaryOp(x, gotoken.Token(op), y)\n\t\treturn &ratVal{ret}\n\t}\n\treturn constant.BinaryOp(x, gotoken.Token(op), y)\n}", "func New() *VerbalExpression {\n\tr := new(VerbalExpression)\n\tr.flags = MULTILINE | GLOBAL\n\tr.parts = make([]string, 0)\n\treturn r\n}", "func NewBinaryTree(vals []Comparable) *BinaryTree {\n\treturn new(BinaryTree).Init(vals)\n}", "func NewPowExpr(scanner parser.Scanner, a, b Expr) Expr {\n\treturn newArithExpr(scanner, a, b, \"^\", func(a, b float64) float64 {\n\t\treturn math.Pow(a, b)\n\t})\n}", "func (e *binaryExprEvaluator) eval(lhs, rhs interface{}) interface{} {\n\tswitch e.op {\n\tcase ADD:\n\t\treturn lhs.(float64) + rhs.(float64)\n\tcase SUB:\n\t\treturn lhs.(float64) - rhs.(float64)\n\tcase MUL:\n\t\treturn lhs.(float64) * rhs.(float64)\n\tcase DIV:\n\t\trhs := rhs.(float64)\n\t\tif rhs == 0 {\n\t\t\treturn float64(0)\n\t\t}\n\t\treturn lhs.(float64) / rhs\n\tdefault:\n\t\t// TODO: Validate operation & data types.\n\t\tpanic(\"invalid operation: \" + e.op.String())\n\t}\n}", "func NewBinarySearchTree() core.IndexTree {\n\treturn &binarySearchTree{\n\t\troot: nil,\n\t\thistory: treeHistory{},\n\t}\n}", "func NewBinaryTree() *BinaryTree {\n\treturn &BinaryTree{}\n}", "func New() *binaryTree {\n\treturn CreateDefaultTree()\n}", "func NewBinaryChop(algo Algorithm) *BinaryChop {\n\tbinaryChop := new(BinaryChop)\n\tbinaryChop.algorithm = algo\n\treturn binaryChop\n}", "func NewFromBinary(b []byte, sheetName string) (*ExcelReport, error) {\n\tr := bytes.NewReader(b)\n\n\ttemplate, err := excelize.OpenReader(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ExcelReport{\n\t\tf: template,\n\t\tsheetName: sheetName,\n\t}, nil\n}", "func (expr *BinaryExpr) String() string {\n\tswitch expr.Op {\n\tcase PLUS:\n\t\treturn expr.X.String() + \" + \" + expr.Y.String()\n\tcase MINUS:\n\t\treturn expr.X.String() + \" - \" + expr.Y.String()\n\tcase STAR:\n\t\treturn expr.X.String() + \" * \" + expr.Y.String()\n\tcase SLASH:\n\t\treturn expr.X.String() + \" / \" + expr.Y.String()\n\tcase REM:\n\t\treturn expr.X.String() + \" % \" + expr.Y.String()\n\tcase CONCAT:\n\t\treturn expr.X.String() + \" || \" + expr.Y.String()\n\tcase BETWEEN:\n\t\treturn expr.X.String() + \" BETWEEN \" + expr.Y.String()\n\tcase NOTBETWEEN:\n\t\treturn expr.X.String() + \" NOT BETWEEN \" + expr.Y.String()\n\tcase LSHIFT:\n\t\treturn expr.X.String() + \" << \" + expr.Y.String()\n\tcase RSHIFT:\n\t\treturn expr.X.String() + \" >> \" + expr.Y.String()\n\tcase BITAND:\n\t\treturn expr.X.String() + \" & \" + expr.Y.String()\n\tcase BITOR:\n\t\treturn expr.X.String() + \" | \" + expr.Y.String()\n\tcase LT:\n\t\treturn expr.X.String() + \" < \" + expr.Y.String()\n\tcase LE:\n\t\treturn expr.X.String() + \" <= \" + expr.Y.String()\n\tcase GT:\n\t\treturn expr.X.String() + \" > \" + expr.Y.String()\n\tcase GE:\n\t\treturn expr.X.String() + \" >= \" + expr.Y.String()\n\tcase EQ:\n\t\treturn expr.X.String() + \" = \" + expr.Y.String()\n\tcase NE:\n\t\treturn expr.X.String() + \" != \" + expr.Y.String()\n\tcase IS:\n\t\treturn expr.X.String() + \" IS \" + expr.Y.String()\n\tcase ISNOT:\n\t\treturn expr.X.String() + \" IS NOT \" + expr.Y.String()\n\tcase IN:\n\t\treturn expr.X.String() + \" IN \" + expr.Y.String()\n\tcase NOTIN:\n\t\treturn expr.X.String() + \" NOT IN \" + expr.Y.String()\n\tcase LIKE:\n\t\treturn expr.X.String() + \" LIKE \" + expr.Y.String()\n\tcase NOTLIKE:\n\t\treturn expr.X.String() + \" NOT LIKE \" + expr.Y.String()\n\tcase GLOB:\n\t\treturn expr.X.String() + \" GLOB \" + expr.Y.String()\n\tcase NOTGLOB:\n\t\treturn expr.X.String() + \" NOT GLOB \" + expr.Y.String()\n\tcase MATCH:\n\t\treturn expr.X.String() + \" MATCH \" + expr.Y.String()\n\tcase NOTMATCH:\n\t\treturn expr.X.String() + \" NOT MATCH \" + expr.Y.String()\n\tcase REGEXP:\n\t\treturn expr.X.String() + \" REGEXP \" + expr.Y.String()\n\tcase NOTREGEXP:\n\t\treturn expr.X.String() + \" NOT REGEXP \" + expr.Y.String()\n\tcase AND:\n\t\treturn expr.X.String() + \" AND \" + expr.Y.String()\n\tcase OR:\n\t\treturn expr.X.String() + \" OR \" + expr.Y.String()\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"sql.BinaryExpr.String(): invalid op %s\", expr.Op))\n\t}\n}", "func New(hashFunc func(i interface{}) int64) *rbTree {\n\treturn &rbTree{hashFunc: hashFunc}\n}", "func evalBinaryComplexExpr(ctx *Ctx, x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) {\n\tvar err error\n\tvar r complex128\n\n\txx, yy := x.Complex(), y.Complex()\n\tswitch op {\n\tcase token.ADD: r = xx + yy\n\tcase token.SUB: r = xx - yy\n\tcase token.MUL: r = xx * yy\n\tcase token.QUO: r = xx / yy\n\tdefault: err = ErrInvalidOperands{x, op, y}\n\t}\n\treturn reflect.ValueOf(r).Convert(x.Type()), err\n}", "func CreateNewExpr() Expr {\n\tc11 := Constant{value: 1.1}\n\tc22 := Constant{value: 2.2}\n\tc33 := Constant{value: 3.3}\n\tbp := BinaryPlus{left: &BinaryPlus{left: &c11, right: &c22}, right: &c33}\n\treturn &bp\n}", "func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) {\n\tif len(ops) == 0 {\n\t\t// We've run out of operators, so now we'll just try to parse a term.\n\t\treturn p.parseExpressionWithTraversals()\n\t}\n\n\tthisLevel := ops[0]\n\tremaining := ops[1:]\n\n\tvar lhs, rhs Expression\n\tvar operation *Operation\n\tvar diags hcl.Diagnostics\n\n\t// Parse a term that might be the first operand of a binary\n\t// operation or it might just be a standalone term.\n\t// We won't know until we've parsed it and can look ahead\n\t// to see if there's an operator token for this level.\n\tlhs, lhsDiags := p.parseBinaryOps(remaining)\n\tdiags = append(diags, lhsDiags...)\n\tif p.recovery && lhsDiags.HasErrors() {\n\t\treturn lhs, diags\n\t}\n\n\t// We'll keep eating up operators until we run out, so that operators\n\t// with the same precedence will combine in a left-associative manner:\n\t// a+b+c => (a+b)+c, not a+(b+c)\n\t//\n\t// Should we later want to have right-associative operators, a way\n\t// to achieve that would be to call back up to ParseExpression here\n\t// instead of iteratively parsing only the remaining operators.\n\tfor {\n\t\tnext := p.Peek()\n\t\tvar newOp *Operation\n\t\tvar ok bool\n\t\tif newOp, ok = thisLevel[next.Type]; !ok {\n\t\t\tbreak\n\t\t}\n\n\t\t// Are we extending an expression started on the previous iteration?\n\t\tif operation != nil {\n\t\t\tlhs = &BinaryOpExpr{\n\t\t\t\tLHS: lhs,\n\t\t\t\tOp: operation,\n\t\t\t\tRHS: rhs,\n\n\t\t\t\tSrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()),\n\t\t\t}\n\t\t}\n\n\t\toperation = newOp\n\t\tp.Read() // eat operator token\n\t\tvar rhsDiags hcl.Diagnostics\n\t\trhs, rhsDiags = p.parseBinaryOps(remaining)\n\t\tdiags = append(diags, rhsDiags...)\n\t\tif p.recovery && rhsDiags.HasErrors() {\n\t\t\treturn lhs, diags\n\t\t}\n\t}\n\n\tif operation == nil {\n\t\treturn lhs, diags\n\t}\n\n\treturn &BinaryOpExpr{\n\t\tLHS: lhs,\n\t\tOp: operation,\n\t\tRHS: rhs,\n\n\t\tSrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()),\n\t}, diags\n}", "func NewBuiltinExpr(terms ...*Term) *Expr {\n\treturn &Expr{Terms: terms}\n}", "func (w *Writer) PutBinaryExpr(b *ast.BinaryExpr) {\n\tif *flagParens {\n\t\tw.Put(\"(\")\n\t}\n\n\tunsigned := IsUnsigned(TypeOf(b.X)) || IsUnsigned(TypeOf(b.Y))\n\n\tx := RValue(b.X)\n\ty := RValue(b.Y)\n\n\tswitch b.Op {\n\tdefault:\n\t\tw.Put(x, b.Op.String(), y)\n\tcase token.EQL:\n\t\tw.PutJEquals(JTypeOfExpr(b.X), x, JTypeOfExpr(b.Y), y)\n\tcase token.LSS, token.GTR, token.LEQ, token.GEQ, token.QUO, token.REM:\n\t\tif unsigned {\n\t\t\tw.PutUnsignedOp(b.X, b.Op, b.Y)\n\t\t} else {\n\t\t\tw.Put(x, b.Op.String(), y) // default\n\t\t}\n\tcase token.SHL, token.SHR, token.AND, token.OR, token.XOR:\n\t\t// different precedence in Go and Java, parentisize to be sure\n\t\tw.Put(\"(\", x, b.Op.String(), y, \")\")\n\tcase token.AND_NOT: //\n\t\t// not in java\n\t\tw.Put(\"(\", x, \"&~\", y, \")\")\n\t}\n\n\tif *flagParens {\n\t\tw.Put(\")\")\n\t}\n}", "func New(fset *token.FileSet, files []*ast.File, in *inspector.Inspector) *Evaluator {\n\treturn &Evaluator{n: NewNodeNavigator(fset, files, in)}\n}", "func NewExpression(expressionStr string) (*Expression, error) {\n\tfunctions := map[string]govaluate.ExpressionFunction{\n\t\t\"in\": func(args ...interface{}) (interface{}, error) {\n\t\t\tif len(args) == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"can't evaluate in() function when zero arguments supplied\")\n\t\t\t}\n\t\t\tv := args[0]\n\t\t\tfor i := 1; i < len(args); i++ {\n\t\t\t\tif v == args[i] {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false, nil\n\t\t},\n\t}\n\n\texpressionCompiled, err := govaluate.NewEvaluableExpressionWithFunctions(expressionStr, functions)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to compile expression '%s': %s\", expressionStr, err)\n\t}\n\treturn &Expression{\n\t\texpressionStr: expressionStr,\n\t\texpressionCompiled: expressionCompiled,\n\t}, nil\n}", "func (ast *Binary) Eval(env *Env, ctx *Codegen, gen *ssa.Generator) (\n\tssa.Value, bool, error) {\n\tl, ok, err := ast.Left.Eval(env, ctx, gen)\n\tif err != nil || !ok {\n\t\treturn ssa.Undefined, ok, err\n\t}\n\tr, ok, err := ast.Right.Eval(env, ctx, gen)\n\tif err != nil || !ok {\n\t\treturn ssa.Undefined, ok, err\n\t}\n\n\tswitch lval := l.ConstValue.(type) {\n\tcase bool:\n\t\trval, ok := r.ConstValue.(bool)\n\t\tif !ok {\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"invalid types: %s %s %s\", l, ast.Op, r)\n\t\t}\n\t\tswitch ast.Op {\n\t\tcase BinaryEq:\n\t\t\treturn gen.Constant(lval == rval, types.Bool), true, nil\n\t\tcase BinaryNeq:\n\t\t\treturn gen.Constant(lval != rval, types.Bool), true, nil\n\t\tcase BinaryAnd:\n\t\t\treturn gen.Constant(lval && rval, types.Bool), true, nil\n\t\tcase BinaryOr:\n\t\t\treturn gen.Constant(lval || rval, types.Bool), true, nil\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"Binary.Eval: '%v %v %v' not supported\", l, ast.Op, r)\n\t\t}\n\n\tcase int32:\n\t\tvar rval int32\n\t\tswitch rv := r.ConstValue.(type) {\n\t\tcase int32:\n\t\t\trval = rv\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"invalid r-value %T %s %T\", lval, ast.Op, rv)\n\t\t}\n\t\tswitch ast.Op {\n\t\tcase BinaryMult:\n\t\t\treturn gen.Constant(lval*rval, types.Int32), true, nil\n\t\tcase BinaryDiv:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval/rval, types.Int32), true, nil\n\t\tcase BinaryMod:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval%rval, types.Int32), true, nil\n\t\tcase BinaryLshift:\n\t\t\treturn gen.Constant(lval<<rval, types.Int32), true, nil\n\t\tcase BinaryRshift:\n\t\t\treturn gen.Constant(lval>>rval, types.Int32), true, nil\n\t\tcase BinaryBand:\n\t\t\treturn gen.Constant(lval&rval, types.Int32), true, nil\n\t\tcase BinaryBclear:\n\t\t\treturn gen.Constant(lval&^rval, types.Int32), true, nil\n\t\tcase BinaryBor:\n\t\t\treturn gen.Constant(lval|rval, types.Int32), true, nil\n\t\tcase BinaryBxor:\n\t\t\treturn gen.Constant(lval^rval, types.Int32), true, nil\n\n\t\tcase BinaryPlus:\n\t\t\treturn gen.Constant(lval+rval, types.Int32), true, nil\n\t\tcase BinaryMinus:\n\t\t\treturn gen.Constant(lval-rval, types.Int32), true, nil\n\n\t\tcase BinaryEq:\n\t\t\treturn gen.Constant(lval == rval, types.Bool), true, nil\n\t\tcase BinaryNeq:\n\t\t\treturn gen.Constant(lval != rval, types.Bool), true, nil\n\t\tcase BinaryLt:\n\t\t\treturn gen.Constant(lval < rval, types.Bool), true, nil\n\t\tcase BinaryLe:\n\t\t\treturn gen.Constant(lval <= rval, types.Bool), true, nil\n\t\tcase BinaryGt:\n\t\t\treturn gen.Constant(lval > rval, types.Bool), true, nil\n\t\tcase BinaryGe:\n\t\t\treturn gen.Constant(lval >= rval, types.Bool), true, nil\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"Binary.Eval: '%v %s %v' not implemented yet\", l, ast.Op, r)\n\t\t}\n\n\tcase uint64:\n\t\tvar rval uint64\n\t\tswitch rv := r.ConstValue.(type) {\n\t\tcase uint64:\n\t\t\trval = rv\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"%T: invalid r-value %v (%T)\", lval, rv, rv)\n\t\t}\n\t\tswitch ast.Op {\n\t\tcase BinaryMult:\n\t\t\treturn gen.Constant(lval*rval, types.Uint64), true, nil\n\t\tcase BinaryDiv:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval/rval, types.Uint64), true, nil\n\t\tcase BinaryMod:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval%rval, types.Uint64), true, nil\n\t\tcase BinaryLshift:\n\t\t\treturn gen.Constant(lval<<rval, types.Uint64), true, nil\n\t\tcase BinaryRshift:\n\t\t\treturn gen.Constant(lval>>rval, types.Uint64), true, nil\n\n\t\tcase BinaryPlus:\n\t\t\treturn gen.Constant(lval+rval, types.Uint64), true, nil\n\t\tcase BinaryMinus:\n\t\t\treturn gen.Constant(lval-rval, types.Uint64), true, nil\n\n\t\tcase BinaryEq:\n\t\t\treturn gen.Constant(lval == rval, types.Bool), true, nil\n\t\tcase BinaryNeq:\n\t\t\treturn gen.Constant(lval != rval, types.Bool), true, nil\n\t\tcase BinaryLt:\n\t\t\treturn gen.Constant(lval < rval, types.Bool), true, nil\n\t\tcase BinaryLe:\n\t\t\treturn gen.Constant(lval <= rval, types.Bool), true, nil\n\t\tcase BinaryGt:\n\t\t\treturn gen.Constant(lval > rval, types.Bool), true, nil\n\t\tcase BinaryGe:\n\t\t\treturn gen.Constant(lval >= rval, types.Bool), true, nil\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"Binary.Eval: '%v %s %v' not implemented yet\", l, ast.Op, r)\n\t\t}\n\n\tdefault:\n\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Left,\n\t\t\t\"%s %v %s: invalid l-value %v (%T)\", l, ast.Op, r, lval, lval)\n\t}\n}", "func NewBinaryField(name string, table Table) BinaryField {\n\treturn BinaryField{\n\t\tname: name,\n\t\ttable: table,\n\t}\n}", "func NewExecBinary(name FileName) ExecBinary {\n\treturn ExecBinary{\n\t\tName: name,\n\t}\n}", "func New() *BinarySearchTree {\n\treturn &BinarySearchTree{root: nil}\n}", "func (fi *funcInfo) emitBinaryOp(line int, op TokenType, a, b, c int) {\r\n\tif opcode, found := arithAndBitwiseBinops[op]; found {\r\n\t\tfi.emitABC(line, opcode, a, b, c)\r\n\t} else {\r\n\t\tswitch op {\r\n\t\tcase TOKEN_OP_EQ:\r\n\t\t\tfi.emitABC(line, OP_EQ, 1, b, c)\r\n\t\tcase TOKEN_OP_NE:\r\n\t\t\tfi.emitABC(line, OP_EQ, 0, b, c)\r\n\t\tcase TOKEN_OP_LT:\r\n\t\t\tfi.emitABC(line, OP_LT, 1, b, c)\r\n\t\tcase TOKEN_OP_GT:\r\n\t\t\tfi.emitABC(line, OP_LT, 1, c, b)\r\n\t\tcase TOKEN_OP_LE:\r\n\t\t\tfi.emitABC(line, OP_LE, 1, b, c)\r\n\t\tcase TOKEN_OP_GE:\r\n\t\t\tfi.emitABC(line, OP_LE, 1, c, b)\r\n\t\t}\r\n\t\tfi.emitJmp(line, 0, 1)\r\n\t\tfi.emitLoadBool(line, a, 0, 1)\r\n\t\tfi.emitLoadBool(line, a, 1, 0)\r\n\t}\r\n}", "func (e *BinExpr) Eval(ctx context.Context, local Scope) (_ Value, err error) {\n\ta, err := e.a.Eval(ctx, local)\n\tif err != nil {\n\t\treturn nil, WrapContextErr(err, e, local)\n\t}\n\n\tb, err := e.b.Eval(ctx, local)\n\tif err != nil {\n\t\treturn nil, WrapContextErr(err, e, local)\n\t}\n\tval, err := e.eval(ctx, a, b, local)\n\tif err != nil {\n\t\treturn nil, WrapContextErr(err, e, local)\n\t}\n\treturn val, nil\n}", "func CloneRefOfBinaryExpr(n *BinaryExpr) *BinaryExpr {\n\tif n == nil {\n\t\treturn nil\n\t}\n\tout := *n\n\tout.Left = CloneExpr(n.Left)\n\tout.Right = CloneExpr(n.Right)\n\treturn &out\n}", "func NewBinaryMessage(payload []byte) Message {\n\treturn Message{Type: MessageTypeBinary, Payload: payload}\n}", "func Binary(key string, val []byte) Field {\n\treturn Field{Key: key, Type: core.BinaryType, Interface: val}\n}", "func NewBinaryHeap() Heap {\n\treturn &binaryHeap{}\n}", "func NewBinarySearchTree() BinarySearchTree {\n\treturn BinarySearchTree{}\n}", "func NewBinarySearchTree() *BinarySearchTree {\n\treturn &BinarySearchTree{\n\t\troot: nil,\n\t}\n}", "func newXorExpr(lhs, rhs Expr) Expr {\n\t// If constant is on right side, swap to left side.\n\tif !IsConstantExpr(lhs) && IsConstantExpr(rhs) {\n\t\tlhs, rhs = rhs, lhs\n\t}\n\n\t// Compute constant if both sides are constant.\n\tif lhs, ok := lhs.(*ConstantExpr); ok {\n\t\tif lhs.Value == 0 {\n\t\t\treturn rhs\n\t\t} else if rhs, ok := rhs.(*ConstantExpr); ok {\n\t\t\treturn lhs.Xor(rhs)\n\t\t}\n\t}\n\n\treturn &BinaryExpr{Op: XOR, LHS: lhs, RHS: rhs}\n}", "func NewXorExpr(x, y Constant) *ExprXor {\n\treturn &ExprXor{X: x, Y: y}\n}", "func (s *BaselimboListener) EnterBinary_expression(ctx *Binary_expressionContext) {}", "func newAlertEvaluator(model conditionEvalJSON) (evaluator, error) {\n\tswitch model.Type {\n\tcase \"gt\", \"lt\":\n\t\treturn newThresholdEvaluator(model)\n\tcase \"within_range\", \"outside_range\":\n\t\treturn newRangedEvaluator(model)\n\tcase \"no_value\":\n\t\treturn &noValueEvaluator{}, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"evaluator invalid evaluator type: %s\", model.Type)\n}", "func (p *Parser) expr(tok token.Token) value.Expr {\n\tif p.peek().Type == token.Assign && tok.Type != token.Identifier {\n\t\tp.errorf(\"cannot assign to %s\", tok)\n\t}\n\texpr := p.operand(tok, true)\n\ttok = p.peek()\n\tswitch tok.Type {\n\tcase token.Newline, token.EOF, token.RightParen, token.RightBrack, token.Semicolon:\n\t\treturn expr\n\tcase token.Identifier:\n\t\t// TODO\n\t\treturn nil\n\tcase token.Operator, token.Assign:\n\t\tp.next()\n\t\treturn &binary{\n\t\t\tleft: expr,\n\t\t\top: tok.Text,\n\t\t\tright: p.expr(p.next()),\n\t\t}\n\t}\n\tp.errorf(\"after expression: unexpected %s\", p.peek())\n\treturn nil\n}", "func NewExprEvaluator(arrays []*Array, values [][]byte) *ExprEvaluator {\n\tassert(len(arrays) == len(values), \"array/value count mismatch: %d != %d\", len(arrays), len(values))\n\n\tm := make(map[uint64][]byte)\n\tfor i, array := range arrays {\n\t\t_, ok := m[array.ID]\n\t\tassert(!ok, \"duplicate array: id=%d\", array.ID)\n\t\tm[array.ID] = values[i]\n\t}\n\n\treturn &ExprEvaluator{m: m}\n}", "func New(value interface{}, comparator comparator.Less) *RBTree {\n\treturn &RBTree{value: value, less: comparator, color: \"black\"}\n}", "func NewBinaryCodec() *BinaryCodec {\n var c *BinaryCodec = &BinaryCodec{}\n c.buf = &bytes.Buffer{}\n return c\n}", "func newBinarySensorGPIOReader(gpio devices.GPIO, pin model.DeviceIndex) (binarySensorReader, error) {\n\treturn &binarySensorGPIOReader{\n\t\tinputDevice: gpio,\n\t\tpin: pin,\n\t}, nil\n}", "func NewXor(x, y Constant) *ExprXor {\n\treturn &ExprXor{\n\t\tX: x,\n\t\tY: y,\n\t}\n}", "func newBinaryReader(file *os.File) (bpr *binaryReader) {\n\tbpr = new(binaryReader)\n\tbpr.file = file\n\tbpr.err = nil\n\treturn bpr\n}", "func NewBinaryHeap(h Heaper) *BHeap {\n\treturn &BHeap{\n\t\tHeapSize: h.Len(),\n\t\tKeys: h,\n\t}\n}", "func (s *BaseMySqlParserListener) EnterBinaryComparasionPredicate(ctx *BinaryComparasionPredicateContext) {\n}", "func NewVarbinary(name string, m map[string]interface{}) (Vindex, error) {\n\treturn &Varbinary{name: name}, nil\n}", "func NewUnaryBooleanExpression(op OP, lE Evaluator) (Evaluator, error) {\n\tswitch op {\n\tcase NOT:\n\t\treturn &booleanNode{\n\t\t\top: op,\n\t\t\tlS: true,\n\t\t\tlE: lE,\n\t\t\trS: false,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, errors.New(\"unary boolean expressions require the operation to be one for the follwing 'not'\")\n\t}\n}", "func check_bin_prim(t ast.Ast) func(*block) *block {\n\ta, ok := t.(ast.Apply)\n\tif ok {\n\t\ti, ok := a.Rator.(ast.Iden)\n\t\tif ok {\n\t\t\tf, ok := strict_binary_primitives[i.Val]\n\t\t\tif ok {\n\t\t\t\treturn f\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func convertToBinary(src base.FixedDataGrid) base.FixedDataGrid {\n\tb := filters.NewBinaryConvertFilter()\n\tattrs := base.NonClassAttributes(src)\n\tfor _, a := range attrs {\n\t\tb.AddAttribute(a)\n\t}\n\tb.Train()\n\tret := base.NewLazilyFilteredInstances(src, b)\n\treturn ret\n}", "func TestNewBinaryValue(t *testing.T) {\n\tvar origin int64 = time.Now().UnixNano()\n\t// assign instance of mockStructB as a CBOR encoded CommandValue payload\n\tvar mock1 models.Event\n\tmock1.DeviceName = \"Device01234567890\"\n\tmock1.Created = origin\n\tmock1.Id = \"MyStringIdentifier\"\n\tmock1.Modified = origin + 123\n\t// To extend coverage cborMock becomes encoded byte array.\n\t// We will then confirm CommandValue particulars of binary payload are valid\n\tcborMock, err := encodeMockEvent(mock1)\n\tif err != nil {\n\t\tt.Errorf(\"NewBinaryValue: Error encoding struct as binary value\")\n\t}\n\tcv, errAssign := NewBinaryValue(\"resource\", origin, cborMock)\n\tif errAssign != nil {\n\t\tt.Errorf(\"NewBinaryValue: Error invoking NewBinaryValue [%v]\", errAssign)\n\t}\n\t// Confirm CommandValue particulars\n\tif cv.Type != contracts.ValueTypeBinary {\n\t\tt.Errorf(\"Expected Binary type! invalid Type: %v\", cv.Type)\n\t}\n\tif cv.Origin != origin {\n\t\tt.Errorf(\"Expected matching value! invalid Origin: %d != %d\", cv.Origin, origin)\n\t}\n\tval, err := cv.BinaryValue()\n\tif err != nil {\n\t\tt.Errorf(\"BinaryValue: error retrieving binary value from command value\")\n\t}\n\tif !reflect.DeepEqual(val, cborMock) {\n\t\tt.Errorf(\"BinaryValue() result doesn't match expected payload\")\n\t}\n}", "func New(values []bool, left, right bool) (c *Cell, err error) {\n\tif len(values) < 1 {\n\t\terr = errors.New(\"values must have length 1 or more\")\n\t\treturn nil, err\n\t}\n\n\tc = &Cell{\n\t\tValues: values,\n\t\tLeft: left,\n\t\tRight: right,\n\t}\n\treturn c, nil\n}", "func NewBitXor(left, right sql.Expression) *BitOp {\n\treturn NewBitOp(left, right, sqlparser.BitXorStr)\n}", "func newNode(expr string) (node, error) {\n\tvar f node\n\tswitch strings.ToLower(expr) {\n\tcase \"\", \"total\":\n\t\tf.value = f.valueTotal\n\tdefault:\n\t\tnodeExpr, err := regexp.Compile(expr)\n\t\tif err != nil {\n\t\t\treturn f, fmt.Errorf(\"node must be either 'total' or a valid regexp: %w\", err)\n\t\t}\n\t\tf.expr = nodeExpr\n\t\tf.value = f.valueSelf\n\t}\n\treturn f, nil\n}" ]
[ "0.7401786", "0.7397373", "0.7016201", "0.67983866", "0.6739764", "0.64406323", "0.63932383", "0.61820513", "0.6167331", "0.61307436", "0.6101428", "0.60082865", "0.5907112", "0.5907112", "0.5889629", "0.5865145", "0.5859371", "0.58282465", "0.5801955", "0.5753771", "0.5711155", "0.5669702", "0.5652779", "0.5641935", "0.55289906", "0.55072796", "0.54695964", "0.54643995", "0.5452721", "0.5444329", "0.5441792", "0.54400325", "0.54079795", "0.5398052", "0.53878057", "0.53866917", "0.5384098", "0.5377944", "0.53252906", "0.53099805", "0.5282585", "0.527421", "0.52690357", "0.52562636", "0.5238622", "0.521789", "0.5201438", "0.5176261", "0.5176115", "0.51717293", "0.51577365", "0.5114093", "0.5097102", "0.50877106", "0.50071746", "0.50017345", "0.4997769", "0.49920708", "0.49861646", "0.49845648", "0.49762824", "0.49759626", "0.49709687", "0.49581406", "0.49571133", "0.4953632", "0.4947437", "0.4940683", "0.4919221", "0.49146986", "0.4905269", "0.49047393", "0.49000975", "0.4864963", "0.48562533", "0.4836271", "0.4830633", "0.4813184", "0.48029906", "0.47941026", "0.47936845", "0.47934404", "0.47892278", "0.478537", "0.4760436", "0.47550586", "0.47524127", "0.47457892", "0.47275048", "0.47272682", "0.47158298", "0.4715214", "0.47098097", "0.4661275", "0.46582305", "0.4658035", "0.46571213", "0.46342444", "0.4630353", "0.4629124" ]
0.888074
0
start begins streaming values from the lhs/rhs processors
func (e *binaryExprEvaluator) start() { e.lhs.start() e.rhs.start() go e.run() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *literalProcessor) start() { go p.run() }", "func (p *Pipe) start() {\n\tp.cancel = make(chan struct{})\n\terrcList := make([]<-chan error, 0, 1+len(p.processors)+len(p.sinks))\n\t// start pump\n\tout, errc := p.pump.run(p.cancel, p.ID(), p.provide, p.consume, p.sampleRate, p.metric)\n\terrcList = append(errcList, errc)\n\n\t// start chained processesing\n\tfor _, proc := range p.processors {\n\t\tout, errc = proc.run(p.cancel, p.ID(), out, p.sampleRate, p.metric)\n\t\terrcList = append(errcList, errc)\n\t}\n\n\tsinkErrcList := p.broadcastToSinks(out)\n\terrcList = append(errcList, sinkErrcList...)\n\tp.errc = mergeErrors(errcList...)\n}", "func (w *Walker) startProcessing() {\n\tdoStart := false\n\tw.pipe.RLock()\n\tif w.pipe.filters == nil { // no processing up to now => start with initial node\n\t\tw.pipe.pushSync(w.initial, 0) // input is buffered, will return immediately\n\t\tdoStart = true // yes, we will have to start the pipeline\n\t}\n\tw.pipe.RUnlock()\n\tif doStart { // ok to be outside mutex as other goroutines will check pipe.empty()\n\t\tw.pipe.startProcessing() // must be outside of mutex lock\n\t}\n}", "func (r *reducer) start() {\n\tfor _, m := range r.mappers {\n\t\tm.start()\n\t}\n\tgo r.run()\n}", "func (s *seriesValueGenerator) Start() error { return nil }", "func (m *mapper) start() {\n\tm.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ,\n\t\tm.executor.min, m.executor.max, m.executor.interval)\n\tgo m.run()\n}", "func (graphMinion *graphMinion) start() {\n\tgo func() {\n\t\tdefer graphMinion.wg.Done()\n\t\tfor {\n\n\t\t\t// pull reads from queue until done\n\t\t\tmappingData, ok := <-graphMinion.inputChannel\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif mappingData == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// increment the nodes contained in the mapping window\n\t\t\tmisc.ErrorCheck(graphMinion.graph.IncrementSubPath(mappingData.ContainedNodes, mappingData.Freq))\n\t\t}\n\t}()\n}", "func main() {\n\tconst n = 100000\n\tstart := make(chan int)\n\tleft := start\n\tright := left\n\tfor i := 0; i < n; i++ {\n\t\tright = make(chan int)\n\t\tgo stage(left, right)\n\t\tleft = right\n\t}\n // make the last one a sink channel\n\tsink := left\n\t// inject the starting value into the daisy chain\n\tstart <- 0\n\tfmt.Println(<-sink)\n}", "func (s *Stream) initGraph() error {\n\ts.log.Print(\"Preparing stream operator graph\")\n\tif s.source == nil {\n\t\treturn fmt.Errorf(\"Operator graph failed, missing source\")\n\t}\n\n\t// if there are no ops, link source to sink\n\tif len(s.ops) == 0 && s.sink != nil {\n\t\ts.log.Print(\"No operator nodes found, binding source to sink directly\")\n\t\ts.sink.SetInput(s.source.GetOutput())\n\t\treturn nil\n\t}\n\n\t// link ops\n\ts.bindOps()\n\n\t// link last op to sink\n\tif s.sink != nil {\n\t\ts.sink.SetInput(s.ops[len(s.ops)-1].GetOutput())\n\t}\n\n\treturn nil\n}", "func (e *binaryExprEvaluator) run() {\n\tfor {\n\t\t// Read LHS value.\n\t\tlhs, ok := <-e.lhs.C()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\t// Read RHS value.\n\t\trhs, ok := <-e.rhs.C()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\t// Merge maps.\n\t\tm := make(map[string]interface{})\n\t\tfor k, v := range lhs {\n\t\t\tm[k] = e.eval(v, rhs[k])\n\t\t}\n\t\tfor k, v := range rhs {\n\t\t\t// Skip value if already processed in lhs loop.\n\t\t\tif _, ok := m[k]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm[k] = e.eval(float64(0), v)\n\t\t}\n\n\t\t// Return value.\n\t\te.c <- m\n\t}\n\n\t// Mark the channel as complete.\n\tclose(e.c)\n}", "func (w *noAggregationStreamWorker) run() {\n\tlog.Debugf(\"Starting streaming routine for the no-aggregation pipeline\")\n\n\tticker := time.NewTicker(noAggWorkerStreamCheckFrequency)\n\tdefer ticker.Stop()\n\tlogPayloads := config.Datadog.GetBool(\"log_payloads\")\n\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\n\tstopped := false\n\tvar stopBlockChan chan struct{}\n\tvar lastStream time.Time\n\n\tfor !stopped {\n\t\tstart := time.Now()\n\t\tserializedSamples := 0\n\n\t\tmetrics.Serialize(\n\t\t\tw.seriesSink,\n\t\t\tw.sketchesSink,\n\t\t\tfunc(seriesSink metrics.SerieSink, sketchesSink metrics.SketchesSink) {\n\t\t\tmainloop:\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\n\t\t\t\t\t// stop signal\n\t\t\t\t\tcase trigger := <-w.stopChan:\n\t\t\t\t\t\tstopped = true\n\t\t\t\t\t\tstopBlockChan = trigger.blockChan\n\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\n\t\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t\tn := time.Now()\n\t\t\t\t\t\tif serializedSamples > 0 && lastStream.Before(n.Add(-time.Second*1)) {\n\t\t\t\t\t\t\tlog.Debug(\"noAggregationStreamWorker: triggering an automatic payloads flush to the forwarder (no traffic since 1s)\")\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\n\t\t\t\t\t// receiving samples\n\t\t\t\t\tcase samples := <-w.samplesChan:\n\t\t\t\t\t\tlog.Debugf(\"Streaming %d metrics from the no-aggregation pipeline\", len(samples))\n\t\t\t\t\t\tfor _, sample := range samples {\n\t\t\t\t\t\t\t// enrich metric sample tags\n\t\t\t\t\t\t\tsample.GetTags(w.taggerBuffer, w.metricBuffer)\n\t\t\t\t\t\t\tw.metricBuffer.AppendHashlessAccumulator(w.taggerBuffer)\n\n\t\t\t\t\t\t\t// turns this metric sample into a serie\n\t\t\t\t\t\t\tvar serie metrics.Serie\n\t\t\t\t\t\t\tserie.Name = sample.Name\n\t\t\t\t\t\t\tserie.Points = []metrics.Point{{Ts: sample.Timestamp, Value: sample.Value}}\n\t\t\t\t\t\t\tserie.Tags = tagset.CompositeTagsFromSlice(w.metricBuffer.Copy())\n\t\t\t\t\t\t\tserie.Host = sample.Host\n\t\t\t\t\t\t\t// ignored when late but mimic dogstatsd traffic here anyway\n\t\t\t\t\t\t\tserie.Interval = 10\n\t\t\t\t\t\t\tw.seriesSink.Append(&serie)\n\n\t\t\t\t\t\t\tw.taggerBuffer.Reset()\n\t\t\t\t\t\t\tw.metricBuffer.Reset()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlastStream = time.Now()\n\n\t\t\t\t\t\tserializedSamples += len(samples)\n\t\t\t\t\t\tif serializedSamples > w.maxMetricsPerPayload {\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, func(serieSource metrics.SerieSource) {\n\t\t\t\tsendIterableSeries(w.serializer, start, serieSource)\n\t\t\t}, func(sketches metrics.SketchesSource) {\n\t\t\t\t// noop: we do not support sketches in the no-agg pipeline.\n\t\t\t})\n\n\t\tif stopped {\n\t\t\tbreak\n\t\t}\n\n\t\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\t}\n\n\tif stopBlockChan != nil {\n\t\tclose(stopBlockChan)\n\t}\n}", "func (s *streamStrategy) Start() {\n\tgo func() {\n\t\tfor msg := range s.inputChan {\n\t\t\tif msg.Origin != nil {\n\t\t\t\tmsg.Origin.LogSource.LatencyStats.Add(msg.GetLatency())\n\t\t\t}\n\t\t\ts.outputChan <- &message.Payload{Messages: []*message.Message{msg}, Encoded: msg.Content, UnencodedSize: len(msg.Content)}\n\t\t}\n\t\ts.done <- struct{}{}\n\t}()\n}", "func (l *Learner) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase lrn := <-l.learnIn:\n\t\t\t\tval, out := l.handleLearn(lrn)\n\t\t\t\tif out {\n\t\t\t\t\tl.valOut <- val\n\t\t\t\t\tl.learned = map[int]Learn{}\n\t\t\t\t}\n\t\t\tcase <-l.stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (l *Learner) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// TODO(student): distributed implementation\n\t\t\tselect {\n\t\t\tcase lrn := <-l.lrnValues:\n\t\t\t\tif val, slot, ok := l.handleLearn(lrn); ok == true {\n\t\t\t\t\tl.decidedOut <- DecidedValue{SlotID: slot, Value: val}\n\t\t\t\t}\n\t\t\tcase <-l.stop:\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\tif seq >= px.Min() {\n\t\tpx.mu.Lock()\n\t\tpx.mu.Unlock()\n\t\tpx.proposer(seq, v) // THIS IS OUR CODE - changed from go\n\t}\n\n}", "func (p *Processor) Forward(xs ...ag.Node) []ag.Node {\n\tg := p.Graph\n\tys := make([]ag.Node, len(xs))\n\tfor i, x := range xs {\n\t\trms := g.Sqrt(g.ReduceMean(g.Square(x)))\n\t\tys[i] = g.Add(g.Prod(g.DivScalar(x, g.AddScalar(rms, p.eps)), p.w), p.b)\n\t}\n\treturn ys\n}", "func (e *ElkTimeseriesForwarder) start() {\n\n\tlog.L.Infof(\"Starting event forwarder for %v\", e.index())\n\tticker := time.NewTicker(e.interval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t//send it off\n\t\t\tlog.L.Debugf(\"Sending bulk ELK update for %v\", e.index())\n\n\t\t\tgo forward(e.index(), e.url, e.buffer)\n\t\t\te.buffer = []ElkBulkUpdateItem{}\n\n\t\tcase event := <-e.incomingChannel:\n\t\t\te.bufferevent(event)\n\t\t}\n\t}\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t// Your code here.\n\t// TBD: check seq < px.Min() in px.Propose? Start should return immidiately\n\tgo px.Propose(seq, v)\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n // Your code here.\n if seq < px.Min() {\n return\n }\n go func() {\n instance := px.getInstance(seq)\n instance.mu.Lock()\n defer instance.mu.Unlock()\n for !px.dead {\n if instance.decidedValue != nil {\n break\n }\n instance.proposer.highestSeenProposedNumber++\n instance.proposer.proposedNumber = instance.proposer.highestSeenProposedNumber\n ok, value := px.propose(instance, seq)\n if !ok {\n continue\n }\n if value != nil {\n v = value\n }\n if !px.requestAccept(instance, seq, v) {\n continue\n }\n px.decide(seq, v)\n break\n }\n }()\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t// Your code here.\n\n\tif seq < px.Min() {\n\t\treturn\n\t}\n\t//args := RequestArgs{1, nil, 3, \"12\"}\n\tgo func(v interface{}, seq int) {\n\t\tnode, ok := px.prepareStatus.Find(seq)\n\t\tfor !ok || !node.State.Done {\n\t\t\t//choose unique n\n\t\t\tn := (int(time.Now().Unix()) << 5) | px.me\n\t\t\taok := false\n\t\t\t//send prepare to all\n\t\t\tvalue, pok := prepare(n, px, v, seq)\n\t\t\t//send accept to all\n\t\t\tif pok {\n\t\t\t\taok = accept(n, px, value, seq)\n\t\t\t\t//log.Printf(\"the seq is %d,the number is %d and the aok is %t\", seq, n, aok)\n\t\t\t}\n\t\t\tif aok {\n\t\t\t\t//send decide to all\n\t\t\t\tdecided(seq, px, value)\n\t\t\t}\n\t\t\t//time.Sleep(20 * time.Millisecond)\n\t\t\tnode, ok = px.prepareStatus.Find(seq)\n\t\t}\n\t}(v, seq)\n\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t//DPrintf(\"Start(%d, %v)\\n\", seq, v)\n\tgo px.propose(seq, v)\n}", "func (sp *StreamPool) Start() {\n\tfor {\n\t\tselect {\n\t\tcase <-sp.quitCh:\n\t\t\tsp.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (l *lexer) run() {\n\tfor state := lexStart; state != nil; {\n\t\tstate = state(l)\n\t}\n}", "func startPipelineFunction(numbers chan<- int) {\n\tfor i := 1; i <= 10; i++ {\n\t\tnumbers <- i\n\t}\n\tclose(numbers)\n}", "func (g *Gosmonaut) Start(\n\ttypes OSMTypeSet,\n\tfuncEntityNeeded func(OSMType, OSMTags) bool,\n) {\n\t// Block until previous run finished\n\tg.lock.Lock()\n\tg.stream = make(chan osmPair, entitiesPerPrimitiveBlock)\n\n\t// Init vars\n\tg.funcEntityNeeded = funcEntityNeeded\n\tg.types = types\n\n\tgo func() {\n\t\t// Decode\n\t\tg.decode()\n\n\t\t// Finish\n\t\tclose(g.stream)\n\t\tg.lock.Unlock()\n\t}()\n}", "func (f *filtererProcessor) Start(ctx context.Context) {\n\tctx = f.StartInternal(ctx, filtererProcName)\n\tf.input.Start(ctx)\n}", "func (a *Agent) startProcessors(\n\tdst chan<- telegraf.Metric,\n\tprocessors models.RunningProcessors,\n) (chan<- telegraf.Metric, []*processorUnit, error) {\n\tvar units []*processorUnit\n\n\t// Sort from last to first\n\tsort.SliceStable(processors, func(i, j int) bool {\n\t\treturn processors[i].Config.Order > processors[j].Config.Order\n\t})\n\n\tvar src chan telegraf.Metric\n\tfor _, processor := range processors {\n\t\tsrc = make(chan telegraf.Metric, 100)\n\t\tacc := NewAccumulator(processor, dst)\n\n\t\terr := processor.Start(acc)\n\t\tif err != nil {\n\t\t\tfor _, u := range units {\n\t\t\t\tu.processor.Stop()\n\t\t\t\tclose(u.dst)\n\t\t\t}\n\t\t\treturn nil, nil, fmt.Errorf(\"starting processor %s: %w\", processor.LogName(), err)\n\t\t}\n\n\t\tunits = append(units, &processorUnit{\n\t\t\tsrc: src,\n\t\t\tdst: dst,\n\t\t\tprocessor: processor,\n\t\t})\n\n\t\tdst = src\n\t}\n\n\treturn src, units, nil\n}", "func (rb *routerBase) Start(ctx context.Context, wg *sync.WaitGroup, ctxCancel context.CancelFunc) {\n\twg.Add(len(rb.outputs))\n\tfor i := range rb.outputs {\n\t\tgo func(ctx context.Context, rb *routerBase, ro *routerOutput, wg *sync.WaitGroup) {\n\t\t\tvar span *tracing.Span\n\t\t\tif rb.statsCollectionEnabled {\n\t\t\t\tctx, span = execinfra.ProcessorSpan(ctx, \"router output\")\n\t\t\t\tspan.SetTag(execinfrapb.StreamIDTagKey, ro.streamID)\n\t\t\t\tro.stats.Inputs = make([]execinfrapb.InputStats, 1)\n\t\t\t}\n\n\t\t\tdrain := false\n\t\t\tstreamStatus := execinfra.NeedMoreRows\n\t\t\tro.mu.Lock()\n\t\t\tfor {\n\t\t\t\t// Send any metadata that has been buffered. Note that we are not\n\t\t\t\t// maintaining the relative ordering between metadata items and rows\n\t\t\t\t// (but it doesn't matter).\n\t\t\t\tif len(ro.mu.metadataBuf) > 0 {\n\t\t\t\t\tm := ro.mu.metadataBuf[0]\n\t\t\t\t\t// Reset the value so any objects it refers to can be garbage\n\t\t\t\t\t// collected.\n\t\t\t\t\tro.mu.metadataBuf[0] = nil\n\t\t\t\t\tro.mu.metadataBuf = ro.mu.metadataBuf[1:]\n\n\t\t\t\t\tro.mu.Unlock()\n\n\t\t\t\t\trb.semaphore <- struct{}{}\n\t\t\t\t\tstatus := ro.stream.Push(nil /*row*/, m)\n\t\t\t\t\t<-rb.semaphore\n\n\t\t\t\t\trb.updateStreamState(&streamStatus, status)\n\t\t\t\t\tro.mu.Lock()\n\t\t\t\t\tro.mu.streamStatus = streamStatus\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif !drain {\n\t\t\t\t\t// Send any rows that have been buffered. We grab multiple rows at a\n\t\t\t\t\t// time to reduce contention.\n\t\t\t\t\tif rows, err := ro.popRowsLocked(ctx); err != nil {\n\t\t\t\t\t\tro.mu.Unlock()\n\t\t\t\t\t\trb.fwdMetadata(&execinfrapb.ProducerMetadata{Err: err})\n\t\t\t\t\t\tro.mu.Lock()\n\t\t\t\t\t\tatomic.StoreUint32(&rb.aggregatedStatus, uint32(execinfra.DrainRequested))\n\t\t\t\t\t\tdrain = true\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if len(rows) > 0 {\n\t\t\t\t\t\tro.mu.Unlock()\n\t\t\t\t\t\trb.semaphore <- struct{}{}\n\t\t\t\t\t\tfor _, row := range rows {\n\t\t\t\t\t\t\tstatus := ro.stream.Push(row, nil)\n\t\t\t\t\t\t\trb.updateStreamState(&streamStatus, status)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t<-rb.semaphore\n\t\t\t\t\t\tif rb.statsCollectionEnabled {\n\t\t\t\t\t\t\tro.stats.Inputs[0].NumTuples.Add(int64(len(rows)))\n\t\t\t\t\t\t}\n\t\t\t\t\t\tro.mu.Lock()\n\t\t\t\t\t\tro.mu.streamStatus = streamStatus\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// No rows or metadata buffered; see if the producer is done.\n\t\t\t\tif ro.mu.producerDone {\n\t\t\t\t\tif rb.statsCollectionEnabled {\n\t\t\t\t\t\tro.stats.Exec.MaxAllocatedMem.Set(uint64(ro.memoryMonitor.MaximumBytes()))\n\t\t\t\t\t\tro.stats.Exec.MaxAllocatedDisk.Set(uint64(ro.diskMonitor.MaximumBytes()))\n\t\t\t\t\t\tspan.RecordStructured(&ro.stats)\n\t\t\t\t\t\tspan.Finish()\n\t\t\t\t\t\tif trace := execinfra.GetTraceData(ctx); trace != nil {\n\t\t\t\t\t\t\tro.mu.Unlock()\n\t\t\t\t\t\t\trb.semaphore <- struct{}{}\n\t\t\t\t\t\t\tstatus := ro.stream.Push(nil, &execinfrapb.ProducerMetadata{TraceData: trace})\n\t\t\t\t\t\t\trb.updateStreamState(&streamStatus, status)\n\t\t\t\t\t\t\t<-rb.semaphore\n\t\t\t\t\t\t\tro.mu.Lock()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tro.stream.ProducerDone()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\t// Nothing to do; wait.\n\t\t\t\tro.mu.cond.Wait()\n\t\t\t}\n\t\t\tro.mu.rowContainer.Close(ctx)\n\t\t\tro.mu.Unlock()\n\n\t\t\tro.rowBufToPushFromAcc.Close(ctx)\n\t\t\tro.memoryMonitor.Stop(ctx)\n\t\t\tro.diskMonitor.Stop(ctx)\n\t\t\tro.rowBufToPushFromMon.Stop(ctx)\n\n\t\t\twg.Done()\n\t\t}(ctx, rb, &rb.outputs[i], wg)\n\t}\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\n\t// Your code here.\n\tlog.Println(\"Paxos.Start me:\", px.me, \"Seq:\", seq, \"px.min:\", px.min, \"px.max:\", px.max, \"Value:\", v)\n\tif seq < px.min {\n\t\treturn\n\t}\n\n\tinstance, ok := px.instances[seq]\n\tif !ok {\n\t\tinstance := new(Instance)\n\t\tinstance.seq = seq\n\t\tinstance.status = Pending\n\t\tinstance.pNum = -1\n\t\tinstance.aNum = -1\n\t\tinstance.aValue = nil\n\t\tpx.instances[seq] = instance\n\t} else {\n\t\tif instance.status == Decided {\n\t\t\tlog.Println(\"Paxos.Start me:\", px.me, \"Seq:\", seq, \"px.min:\", px.min, \"px.max:\", px.max, \"Value:\", v, \"SEQ HAS BEEN DECIDED\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tgo px.propose(seq, v)\n}", "func (r *RecordStream) Start() {\n\tif r.state == idle {\n\t\tr.err = nil\n\t\tr.c.c.Request(&proto.FlushRecordStream{StreamIndex: r.index}, nil)\n\t\tr.c.c.Request(&proto.CorkRecordStream{StreamIndex: r.index, Corked: false}, nil)\n\t\tr.state = running\n\t}\n}", "func (p *literalProcessor) run() {\n\tfor {\n\t\tselect {\n\t\tcase ch := <-p.done:\n\t\t\tclose(ch)\n\t\t\treturn\n\t\tcase p.c <- map[string]interface{}{\"\": p.val}:\n\t\t}\n\t}\n}", "func (r *streamRangeVectorIterator) load(start, end int64) {\n\tfor lbs, sample, hasNext := r.iter.Peek(); hasNext; lbs, sample, hasNext = r.iter.Peek() {\n\t\tif sample.Timestamp > end {\n\t\t\t// not consuming the iterator as this belong to another range.\n\t\t\treturn\n\t\t}\n\t\t// the lower bound of the range is not inclusive\n\t\tif sample.Timestamp <= start {\n\t\t\t_ = r.iter.Next()\n\t\t\tcontinue\n\t\t}\n\t\t// adds the sample.\n\t\tvar rangeAgg RangeStreamingAgg\n\t\tvar ok bool\n\t\trangeAgg, ok = r.windowRangeAgg[lbs]\n\t\tif !ok {\n\t\t\tvar metric labels.Labels\n\t\t\tif _, ok = r.metrics[lbs]; !ok {\n\t\t\t\tvar err error\n\t\t\t\tmetric, err = promql_parser.ParseMetric(lbs)\n\t\t\t\tif err != nil {\n\t\t\t\t\t_ = r.iter.Next()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tr.metrics[lbs] = metric\n\t\t\t}\n\n\t\t\t// never err here ,we have check error at evaluator.go rangeAggEvaluator() func\n\t\t\trangeAgg, _ = streamingAggregator(r.r)\n\t\t\tr.windowRangeAgg[lbs] = rangeAgg\n\t\t}\n\t\tp := promql.Point{\n\t\t\tT: sample.Timestamp,\n\t\t\tV: sample.Value,\n\t\t}\n\t\trangeAgg.agg(p)\n\t\t_ = r.iter.Next()\n\t}\n}", "func (p *Pipeline) FeedForward(index int, seqNo int, data interface{}) {\n\tif index++; index < len(p.nodes) {\n\t\tp.nodes[index].Feed(p, index, seqNo, data)\n\t}\n}", "func (p *Pipeline) FeedForward(index int, seqNo int, data interface{}) {\n\tif index++; index < len(p.nodes) {\n\t\tp.nodes[index].Feed(p, index, seqNo, data)\n\t}\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t// Your code here.\n\n\tgo func() {\n\t\t//forgotten\n\t\tif seq < px.Min() {\n\t\t\t//log.Printf(\"Peer%d submit instance%d, forgotten, return directly\\n\", px.me, seq)\n\t\t\treturn\n\t\t}\n\n\t\t//log.Printf(\"Peer%d submit instance%d\\n\", px.me, seq)\n\t\tfor {\n\t\t\tisDecided, isPreMajority, AnswerPrepare, acceptInfo := px.BroadcastPrepare(seq, v)\n\n\t\t\tif isDecided {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !isPreMajority {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tisAccMajority := px.BroadcastAccept(seq, AnswerPrepare, acceptInfo)\n\n\t\t\tif !isAccMajority {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpx.BroadcastDecide(seq, acceptInfo)\n\n\t\t\tbreak\n\t\t}\n\t}()\n}", "func (d *disp) Start() *disp {\n\n\t//pipelines_implementation_backup(d)\n\n\tl := len(d.Workers)\n\tfor i := 1; i <= l; i++ {\n\n\t\twrk := worker2.New(i, \"default\", make(worker2.PipelineChannel), d.PipelineQueue, make(worker2.JobChannel), d.Queue, make(chan struct{}))\n\t\twrk.Start()\n\t\td.Workers = append(d.Workers, wrk)\n\t}\n\tgo d.process()\n\n\treturn d\n}", "func (f *FakeOutput) Start(_ operator.Persister) error { return nil }", "func LRFilter(predicate Predicate, left Pipeline, right Pipeline) Stage {\n\treturn StageFnc(func(in <-chan interface{}) <-chan interface{} {\n\t\tif predicate == nil || (len(left) == 0 && len(right) == 0) || (hasNilStage(left) && hasNilStage(right)) {\n\t\t\treturn in\n\t\t}\n\n\t\tlchan := make(chan interface{}, (cap(in)/2)+1)\n\t\trchan := make(chan interface{}, (cap(in)/2)+1)\n\t\tout := make(chan interface{}, cap(in))\n\n\t\twg := &sync.WaitGroup{}\n\t\twg.Add(2)\n\t\tgo runPipeline(left, lchan, out, wg)\n\t\tgo runPipeline(right, rchan, out, wg)\n\n\t\tgo func() {\n\t\t\tdefer close(lchan)\n\t\t\tdefer close(rchan)\n\n\t\t\tfor value := range in {\n\t\t\t\tif predicate(value) {\n\t\t\t\t\tlchan <- value\n\t\t\t\t} else {\n\t\t\t\t\trchan <- value\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tgo func() { wg.Wait(); close(out) }() // Close out only when all goroutine are stopped\n\t\treturn out\n\t})\n}", "func (*filterMetricProcessor) Start(ctx context.Context, host component.Host) error {\n\treturn nil\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)\n\t// play the role of proposer\n\n\t// Your code here.\n\n\tpx.clog(DBG_PROPOSER, \"Start\", \"Start seq=%d v=%v\", seq, v)\n\n\t// I'm Proposer\n\tgo func() {\n\t\tn := 0\n\t\tmax_reject_pnum := -1\n\t\tfor {\n\t\t\tif px.dead {\n\t\t\t\t// I'm dead\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif px.Lslots[seq].Decided {\n\t\t\t\t// locally decided, wouldn't send prepare and accept anymore\n\t\t\t\t// just propagate the decision\n\t\t\t\tpx.send_decided(seq, px.Lslots[seq].V)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif px.APp[seq]+1 > n {\n\t\t\t\tn = px.APp[seq] + 1\n\t\t\t} else {\n\t\t\t\tn++\n\t\t\t}\n\n\t\t\tif n < max_reject_pnum {\n\t\t\t\tn = max_reject_pnum + 1\n\t\t\t}\n\n\t\t\tpx.clog(DBG_PROPOSER, \"Start\", \"send prepare, seq=%d n=%d\", seq, n)\n\n\t\t\tprepare_ok, p := px.send_prepare(seq, n)\n\t\t\tif !prepare_ok {\n\t\t\t\tmax_reject_pnum = p.PNum\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnew_p := Proposal{}\n\n\t\t\t// no proposal yet, use v\n\t\t\tif p.PNum == 0 {\n\t\t\t\tnew_p.Value = v\n\t\t\t} else {\n\t\t\t\tnew_p.Value = p.Value\n\t\t\t}\n\n\t\t\tnew_p.PNum = n\n\n\t\t\tpx.clog(DBG_PROPOSER, \"Start\", \"prepare OK, proposal=%v\", new_p)\n\n\t\t\taccept_ok := px.send_accept(seq, new_p)\n\t\t\tif !accept_ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpx.clog(DBG_PROPOSER, \"Start\", \"accept OK\")\n\n\t\t\tpx.send_decided(seq, new_p.Value)\n\n\t\t\tpx.clog(DBG_PROPOSER, \"Start\", \"decided\")\n\t\t\tbreak\n\t\t}\n\t}()\n}", "func (c *Collector) Start() {\n\tgo c.Source.Start()\n\tc.collect()\n}", "func (s *Solver) StreamThreaded() {\n\ts.barrierCount = 0\n\ts.barrierxSum = 0\n\ts.barrierySum = 0\n\ts.barrierFx = 0.0\n\ts.barrierFy = 0.0\n\tsem1 := make(chan streamSem, 4)\n\n\tgo func() {\n\t\tfor y := s.ydim - 2; y > 0; y-- { // first start in NW corner...\n\t\t\tfor x := 1; x < s.xdim-1; x++ {\n\t\t\t\ts.nN[x+y*s.xdim] = s.nN[x+(y-1)*s.xdim] // move the north-moving particles\n\t\t\t\ts.nNW[x+y*s.xdim] = s.nNW[x+1+(y-1)*s.xdim] // and the northwest-moving particles\n\t\t\t}\n\t\t}\n\t\tsem1 <- streamSem{}\n\t}()\n\tgo func() {\n\t\tfor y := s.ydim - 2; y > 0; y-- { // now start in NE corner...\n\t\t\tfor x := s.xdim - 2; x > 0; x-- {\n\t\t\t\ts.nE[x+y*s.xdim] = s.nE[x-1+y*s.xdim] // move the east-moving particles\n\t\t\t\ts.nNE[x+y*s.xdim] = s.nNE[x-1+(y-1)*s.xdim] // and the northeast-moving particles\n\t\t\t}\n\t\t}\n\t\tsem1 <- streamSem{}\n\t}()\n\tgo func() {\n\t\tfor y := 1; y < s.ydim-1; y++ { // now start in SE corner...\n\t\t\tfor x := s.xdim - 2; x > 0; x-- {\n\t\t\t\ts.nS[x+y*s.xdim] = s.nS[x+(y+1)*s.xdim] // move the south-moving particles\n\t\t\t\ts.nSE[x+y*s.xdim] = s.nSE[x-1+(y+1)*s.xdim] // and the southeast-moving particles\n\t\t\t}\n\t\t}\n\t\tsem1 <- streamSem{}\n\t}()\n\tgo func() {\n\t\tfor y := 1; y < s.ydim-1; y++ { // now start in the SW corner...\n\t\t\tfor x := 1; x < s.xdim-1; x++ {\n\t\t\t\ts.nW[x+y*s.xdim] = s.nW[x+1+y*s.xdim] // move the west-moving particles\n\t\t\t\ts.nSW[x+y*s.xdim] = s.nSW[x+1+(y+1)*s.xdim] // and the southwest-moving particles\n\t\t\t}\n\t\t}\n\t\tsem1 <- streamSem{}\n\t}()\n\n\t// Synchronize all threads\n\t<-sem1\n\t<-sem1\n\t<-sem1\n\t<-sem1\n\n\tsem2 := make(chan streamSem, s.ydim-1)\n\tfor yy := 1; yy < s.ydim-1; yy++ { // Now handle bounce-back from barriers\n\t\tgo func(y int) {\n\t\t\tfor x := 1; x < s.xdim-1; x++ {\n\t\t\t\tif s.barrier[x+y*s.xdim] {\n\t\t\t\t\tvar index = x + y*s.xdim\n\t\t\t\t\ts.nE[x+1+y*s.xdim] = s.nW[index]\n\t\t\t\t\ts.nW[x-1+y*s.xdim] = s.nE[index]\n\t\t\t\t\ts.nN[x+(y+1)*s.xdim] = s.nS[index]\n\t\t\t\t\ts.nS[x+(y-1)*s.xdim] = s.nN[index]\n\t\t\t\t\ts.nNE[x+1+(y+1)*s.xdim] = s.nSW[index]\n\t\t\t\t\ts.nNW[x-1+(y+1)*s.xdim] = s.nSE[index]\n\t\t\t\t\ts.nSE[x+1+(y-1)*s.xdim] = s.nNW[index]\n\t\t\t\t\ts.nSW[x-1+(y-1)*s.xdim] = s.nNE[index]\n\t\t\t\t\t// Keep track of stuff needed to plot force vector:\n\t\t\t\t\ts.barrierCount++\n\t\t\t\t\ts.barrierxSum += x\n\t\t\t\t\ts.barrierySum += y\n\t\t\t\t\ts.barrierFx += s.nE[index] + s.nNE[index] + s.nSE[index] - s.nW[index] - s.nNW[index] - s.nSW[index]\n\t\t\t\t\ts.barrierFy += s.nN[index] + s.nNE[index] + s.nNW[index] - s.nS[index] - s.nSE[index] - s.nSW[index]\n\t\t\t\t}\n\t\t\t}\n\t\t\tsem2 <- streamSem{}\n\t\t}(yy)\n\t}\n\n\t// Sync\n\tfor yy := 1; yy < s.ydim-1; yy++ {\n\t\t<-sem2\n\t}\n\n}", "func (a *Agent) start() {\n\ta.initAPI()\n\tnb := 0\n\tfor {\n\t\ta.updateStreams()\n\t\tnb++\n\t\tif nb == 10 {\n\t\t\tlog.Printf(\"Sent %d logs and %d metrics on the last %d seconds\\n\", a.nbLogs, a.nbMetrics, nb*conf.period)\n\t\t\tnb = 0\n\t\t\ta.nbLogs = 0\n\t\t\ta.nbMetrics = 0\n\t\t}\n\t\ttime.Sleep(time.Duration(conf.period) * time.Second)\n\t}\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\tpx.mu.Lock()\n\ts, ok := px.Stati[seq]\n\t// fmt.Printf(\"STILL GOING! %d %#v\\n\", seq, v)\n\tif px.printing && px.me == 2 && Debug {\n\t\tfmt.Printf(\"START %d %d %t --- %s \", px.me, seq, px.recovery, fateString(s))\n\t\tfmt.Println(v)\n\t}\n\n\tif px.DoneSeqs[px.me] <= seq && (!ok || s == Pending) {\n\t\tpx.mu.Unlock()\n\t\tgo px.propose(seq, v)\n\t} else {\n\t\tpx.mu.Unlock()\n\t}\n}", "func (lx *lexer) run() {\n\tfor state := lxBase; state != nil; {\n\t\tstate = state(lx)\n\t}\n\tclose(lx.tokStream)\n}", "func (p *Processor) Forward(xs ...ag.Node) []ag.Node {\n\tlength := len(xs)\n\tqs := p.query.Forward(xs...)\n\tks := make([]ag.Node, length)\n\tvs := p.value.Forward(xs...)\n\tmapk := make(map[int]*IndexedNodes)\n\tmapv := make(map[int]*IndexedNodes)\n\n\t// TODO: can it be implemented in a concurrent fashion?\n\tfor i, q := range qs {\n\t\tnorm := p.Graph.Sqrt(p.Graph.ReduceSum(p.Graph.Pow(q, 2.0)))\n\t\tks[i] = p.Graph.DivScalar(q, norm) // Euclidean norm\n\t\th := p.getHash(ks[i].Value().(*mat.Dense))\n\t\tinsertNode(mapk, ks[i], i, h)\n\t\tinsertNode(mapv, vs[i], i, h)\n\t}\n\n\tcontext := make([]ag.Node, length)\n\tprob := make([]mat.Matrix, length)\n\tfor i, q := range qs {\n\t\tj := p.getHash(q.Value().(*mat.Dense))\n\t\tc, p := p.lshScaledDotProductAttention(p.Graph, q, mapk[j], mapv[j], length, p.scaleFactor)\n\t\tcontext[i], prob[i] = c, p\n\t}\n\n\tp.Attention = &ContextProb{\n\t\tcontext: context,\n\t\tprob: prob,\n\t}\n\treturn context\n}", "func (w *SimpleMapReduce) Start() *SimpleMapReduce {\n if (w.hasStarted) {\n return w\n }\n\n w.hasStarted = true\n\n for i := 0; i < w.mappers; i++ {\n mapFn := w.mapFn\n mapperFinished := make(chan bool)\n w.mappersFinished[i] = mapperFinished\n\n // Parallel function which performs the map and adds the result to the reduction queue\n go func() {\n for item := range w.workQueue {\n res := mapFn(item)\n w.reduceQueue <- res\n }\n close(mapperFinished)\n }()\n }\n\n // If a reduction function is specified, start it. Otherwise, simply close the reducedFinish\n // channel.\n if (w.reduceFn != nil) {\n go func() {\n w.reduceFn(w.reduceQueue)\n close(w.reducedFinished)\n }()\n } else {\n close(w.reducedFinished)\n }\n\n return w\n}", "func (inNode *InputNode) Start() {\n}", "func (collector *Collector) Start() {\n\t// Begin our internal processing first\n\tgo collector.process()\n\n\t// Start the prospector to start collecting data\n\tcollector.prospector.Start()\n}", "func(oj *outerJoin)ProcessStreamFirst(msg content.IContent,fieldsFromStream1 []string){\n\t//for join using hash\n\tif oj.JoinStrategy == HASH{\n\t\tvar joinFieldsVal []interface{}\n\t\t//get all the field values of stream 1 like age =18 name=ram kumar etc\n\t\tfor _,field := range fieldsFromStream1{\n\t\t\tjoinFieldsVal= append(joinFieldsVal,msg.Values()[strings.TrimSpace(field)]) //eats any unwanted white spaces..Note:may be the code is reduandant and may require cleaning in later version\n\t\t}\n\t\tkey := concatKeys(joinFieldsVal) //concats the fields '18 ram kumar' is obtained\n\t\toj.hashTable.Set(msg,key)// inserts the concat values as key and msg as the value in the hash table\n\t}\n}", "func (source *ipv6IpamSource) start(sink addressConfigSink) error {\n\tsource.sink = sink\n\treturn nil\n}", "func (p *Processor) Forward(xs ...ag.Node) []ag.Node {\n\tys := make([]ag.Node, len(xs))\n\tfor i, x := range xs {\n\t\tys[i] = p.forward(x)\n\t}\n\treturn ys\n}", "func (p *Processor) Start() {\n\tp.setDefaults()\n\tdispatcher := p.dispatcher\n\tdispatcher.Run()\n\tstopChan := p.stopChan\n\nLOOP:\n\tfor {\n\t\tbuffer := p.byteArrayPool.Get()\n\t\trlen, remote, err := p.Conn.ReadFrom(buffer)\n\t\tif err != nil {\n\t\t\tif p.isCloseError(err) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tpanic(err)\n\t\t}\n\t\t_, _ = rlen, remote\n\t\tdispatcher.SubmitJob(buffer)\n\t\tselect {\n\t\tcase <-stopChan:\n\t\t\tbreak LOOP\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tdispatcher.Stop()\n}", "func (engine *Engine) StreamBeginData(\n\tvbno uint16, vbuuid, seqno uint64, status byte,\n\tcode byte, opaque2 uint64, oso bool) interface{} {\n\n\treturn engine.evaluator.StreamBeginData(vbno, vbuuid, seqno,\n\t\tGetNodeUUID(), status, code, opaque2, oso)\n}", "func (e *binaryExprEvaluator) stop() {\n\te.lhs.stop()\n\te.rhs.stop()\n\tsyncClose(e.done)\n}", "func (a *Acceptor) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t//TODO(student): Task 3 - distributed implementation\n\t\t\tselect {\n\t\t\tcase prpMsg := <-a.prepareChan: //TODO: Create a channel to recvie pepare messages from other nodes\n\t\t\t\tif prmMsg, sendMsg := a.handlePrepare(prpMsg); sendMsg == true {\n\t\t\t\t\ta.PromiseOutChan <- prmMsg\n\t\t\t\t}\n\t\t\tcase accMsg := <-a.acceptChan: //TODO: Create a channel to receive accept messages from other nodes\n\t\t\t\tif lrnMsg, sendMsg := a.handleAccept(accMsg); sendMsg == true {\n\t\t\t\t\ta.LearnOutChan <- lrnMsg\n\t\t\t\t}\n\t\t\tcase <-a.stopChan:\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n}", "func stream(key string, fields map[string]bool, ring *[100]*NodeMembership) {\n fmt.Println(\"Streaming these fields from \" + key + \": \", fields)\n\n hashKey := hash(key)\n\n var request Request\n request.requestType = \"stream\"\n request.key = hashKey\n request.fields = fields\n\n pos := hashKey\n for {\n nodeMembership := ring[pos]\n if nodeMembership != nil {\n nodeMembership.requestReceiver <- request\n break\n } else {\n pos = (pos + 1) % len(ring)\n }\n }\n}", "func txs(cpu *CPU, step *runStep) {\n\tcpu.sp = cpu.x\n}", "func (q *CQPU) forward(predicate []*pbUtils.AttributePredicate, streamRec *pbQPU.ResponseStreamRecord, streamOut pbQPU.QPU_QueryServer, seqID *int64, respond bool) error {\n\tif respond {\n\t\terr := streamOut.Send(\n\t\t\tprotoutils.ResponseStreamRecord(\n\t\t\t\t*seqID,\n\t\t\t\tstreamRec.GetType(),\n\t\t\t\tstreamRec.GetLogOp(),\n\t\t\t))\n\t\t(*seqID)++\n\t\treturn err\n\t}\n\treturn nil\n}", "func (pr *PeriodicReader) start(ctx context.Context) {\n\tdefer pr.wait.Done()\n\tticker := time.NewTicker(pr.interval)\n\tfor {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tif err := pr.collectWithTimeout(ctx, pr.exporter.ExportMetrics); err != nil {\n\t\t\t\totel.Handle(err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (l *lexer) run() {\n\tfor state := lexSchema; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items) // No more tokens will be delivered.\n}", "func (p *Pipeline) Start() {\n\tfmt.Printf(\"\\nIn start\")\n\tC.gstreamer_receive_start_pipeline(p.Pipeline)\n}", "func (op *compose) run(s stream) stream {\n\tif err := op.validate(op.streams); err != nil {\n\t\ts.err = err\n\t\treturn s\n\t}\n\tif s.streams == nil {\n\t\ts.streams = make([]stream, 0)\n\t}\n\tfor _, str := range op.streams {\n\t\ts.streams = append(s.streams, str.(stream))\n\t}\n\treturn s\n}", "func (r *InMemorySourceReader) Begin() {\n\tr.iStack.PushBack(r.i)\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t// Your code here.\n\n\tmin := px.Min()\n\tpx.mu.Lock()\n\tif px.instances[seq] == nil && seq >= min {\n\t\tpx.CreateInstance(seq)\n\t}\n\tpx.mu.Unlock()\n\n\tstatus, _ := px.Status(seq)\n\tif status == Decided || status == Forgotten {\n\t\treturn\n\t}\n\tgo px.Proposer(seq, v)\n\t//execute goroutine to start agreement process so this can return immediately\n\t//goroutine needs to propose value from this peer, also run acceptor and learner?\n\t//actual paxos agreement for single instance should be run by this\n\t//goroutine also needs to transmit done value for each peer in the messages and update min and delete from map\n\t//set na to 0\n}", "func (s *Solver) Stream() {\n\ts.barrierCount = 0\n\ts.barrierxSum = 0\n\ts.barrierySum = 0\n\ts.barrierFx = 0.0\n\ts.barrierFy = 0.0\n\tfor y := s.ydim - 2; y > 0; y-- { // first start in NW corner...\n\t\tfor x := 1; x < s.xdim-1; x++ {\n\t\t\ts.nN[x+y*s.xdim] = s.nN[x+(y-1)*s.xdim] // move the north-moving particles\n\t\t\ts.nNW[x+y*s.xdim] = s.nNW[x+1+(y-1)*s.xdim] // and the northwest-moving particles\n\t\t}\n\t}\n\tfor y := s.ydim - 2; y > 0; y-- { // now start in NE corner...\n\t\tfor x := s.xdim - 2; x > 0; x-- {\n\t\t\ts.nE[x+y*s.xdim] = s.nE[x-1+y*s.xdim] // move the east-moving particles\n\t\t\ts.nNE[x+y*s.xdim] = s.nNE[x-1+(y-1)*s.xdim] // and the northeast-moving particles\n\t\t}\n\t}\n\tfor y := 1; y < s.ydim-1; y++ { // now start in SE corner...\n\t\tfor x := s.xdim - 2; x > 0; x-- {\n\t\t\ts.nS[x+y*s.xdim] = s.nS[x+(y+1)*s.xdim] // move the south-moving particles\n\t\t\ts.nSE[x+y*s.xdim] = s.nSE[x-1+(y+1)*s.xdim] // and the southeast-moving particles\n\t\t}\n\t}\n\tfor y := 1; y < s.ydim-1; y++ { // now start in the SW corner...\n\t\tfor x := 1; x < s.xdim-1; x++ {\n\t\t\ts.nW[x+y*s.xdim] = s.nW[x+1+y*s.xdim] // move the west-moving particles\n\t\t\ts.nSW[x+y*s.xdim] = s.nSW[x+1+(y+1)*s.xdim] // and the southwest-moving particles\n\t\t}\n\t}\n\tfor y := 1; y < s.ydim-1; y++ { // Now handle bounce-back from barriers\n\t\tfor x := 1; x < s.xdim-1; x++ {\n\t\t\tif s.barrier[x+y*s.xdim] {\n\t\t\t\tvar index = x + y*s.xdim\n\t\t\t\ts.nE[x+1+y*s.xdim] = s.nW[index]\n\t\t\t\ts.nW[x-1+y*s.xdim] = s.nE[index]\n\t\t\t\ts.nN[x+(y+1)*s.xdim] = s.nS[index]\n\t\t\t\ts.nS[x+(y-1)*s.xdim] = s.nN[index]\n\t\t\t\ts.nNE[x+1+(y+1)*s.xdim] = s.nSW[index]\n\t\t\t\ts.nNW[x-1+(y+1)*s.xdim] = s.nSE[index]\n\t\t\t\ts.nSE[x+1+(y-1)*s.xdim] = s.nNW[index]\n\t\t\t\ts.nSW[x-1+(y-1)*s.xdim] = s.nNE[index]\n\t\t\t\t// Keep track of stuff needed to plot force vector:\n\t\t\t\ts.barrierCount++\n\t\t\t\ts.barrierxSum += x\n\t\t\t\ts.barrierySum += y\n\t\t\t\ts.barrierFx += s.nE[index] + s.nNE[index] + s.nSE[index] - s.nW[index] - s.nNW[index] - s.nSW[index]\n\t\t\t\ts.barrierFy += s.nN[index] + s.nNE[index] + s.nNW[index] - s.nS[index] - s.nSE[index] - s.nSW[index]\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *Chunker) Start() {\n\t// No entries in the hash table.\n\tfor i := 0; i < len(c.ht); i++ {\n\t\tc.ht[i] = -c.htlen\n\t}\n\n\t// Nothing in the queue waiting to be added to the table, either.\n\tfor i := 0; i < len(c.b); i++ {\n\t\tc.b[i] = c.p\n\t}\n\n\t// No bytes input yet.\n\tc.akr = (-c.p) % c.p\n\tc.yka = 0\n\tc.buf = c.buf[:0]\n\tc.r = 0\n\tc.rs = 1 + c.mu\n}", "func (a *Accumulator)Start(){\n\tgo func() {\n\t\tfor stats := range a.StatsChan {\n\t\t\ta.mu.Lock()\n\t\t\ta.Stats = append(a.Stats, stats)\n\t\t\ta.mu.Unlock()\n\t\t\tif ( len(a.Stats) >= a.MaxResponses) {\n\t\t\t\tLog(\"top\", \"All requests received\")\n\t\t\t\ta.Done <- true\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor stats := range a.OverallStatsChan {\n\t\t\ta.mu.Lock()\n\t\t\ta.OverallStats = append(a.OverallStats, stats)\n\t\t\ta.mu.Unlock()\n\t\t}\n\t}()\n}", "func (w *Worker) startReader() {\n\tdump, err := os.Open(w.InputFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdecoder := xml.NewDecoder(dump)\n\n\tfor {\n\t\tt, _ := decoder.Token()\n\t\tif t == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Inspect the type of the token just read.\n\t\tswitch se := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\tif se.Name.Local == \"page\" {\n\t\t\t\tvar p Page\n\t\t\t\tdecoder.DecodeElement(&p, &se)\n\n\t\t\t\tfound := find(seen, p.Title)\n\t\t\t\tif found {\n\t\t\t\t\tlog.Printf(\"Duplicate title: %s. Skipping...\", p.Title)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tw.InPage <- &p\n\t\t\t}\n\t\t}\n\t}\n\n\t// Close the channels associated with reading/writing\n\tclose(w.InPage)\n\tlog.Println(\"Reader done\")\n}", "func (s *BasePlSqlParserListener) EnterStreaming_clause(ctx *Streaming_clauseContext) {}", "func (s *BasevhdlListener) EnterMultiplying_operator(ctx *Multiplying_operatorContext) {}", "func exprStream(node *node32) <-chan *node32 {\n\tout := make(chan *node32)\n\tgo func() {\n\t\tfor ; node != nil; node = node.next {\n\t\t\tswitch node.pegRule {\n\t\t\tcase ruleSPACE:\n\t\t\tcase ruleBOOLLITER:\n\t\t\t\tout <- node.up\n\t\t\tcase ruleEXPR:\n\t\t\t\tfor inode := range exprStream(node.up) {\n\t\t\t\t\tout <- inode\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tout <- node\n\t\t\t}\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func (s *BasecluListener) EnterIterator(ctx *IteratorContext) {}", "func sourceWorker(ctx context.Context, source Source, outCh chan<- Payload, errCh chan<- error) {\n\tfor source.Next(ctx) {\n\t\tpayload := source.Payload()\n\t\tselect {\n\t\tcase outCh <- payload:\n\t\tcase <-ctx.Done():\n\t\t\t// Asked to shutdown\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Check for errors\n\tif err := source.Error(); err != nil {\n\t\twrappedErr := xerrors.Errorf(\"pipeline source: %w\", err)\n\t\tmaybeEmitError(wrappedErr, errCh)\n\t}\n}", "func (p *Processor) Forward(xs ...ag.Node) []ag.Node {\n\tif p.RequiresFullSeq() {\n\t\treturn p.fullSeqForward(xs)\n\t}\n\treturn p.incrementalForward(xs)\n}", "func (l *lexer) run() {\n\tfor l.state = lexRule; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n\tclose(l.items)\n}", "func (job *Job) startPipeline(w http.ResponseWriter, r *http.Request, jobstore *JobStore) {\n\tstartTime := time.Now()\n\tfmt.Fprintf(w, \"Executing Job: %s\\n\", job.name)\n\ttime.Sleep(1 * time.Second)\n\tfmt.Fprintf(w, \"Finished executing Job: %s in duration: %s \\n\", job.name, time.Since(startTime))\n\tjobstore.addJobs(Job{job.name, job.scm, time.Since(startTime), time.Now().UTC()})\n}", "func (a *LogAgent) Start(persister operator.Persister) (err error) {\n\ta.startOnce.Do(func() {\n\t\terr = a.pipeline.Start(persister)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t})\n\treturn\n}", "func (s *Stream) bindOps() {\n\ts.log.Print(\"binding operators\")\n\tif s.ops == nil {\n\t\treturn\n\t}\n\tfor i, op := range s.ops {\n\t\tif i == 0 { // link 1st to source\n\t\t\top.SetInput(s.source.GetOutput())\n\t\t} else {\n\t\t\top.SetInput(s.ops[i-1].GetOutput())\n\t\t}\n\t}\n}", "func (l *lexer) run() {\r\n\tfor l.state = lexAny(l); l.state != nil; {\r\n\t\tl.state = l.state(l)\r\n\t}\r\n\tclose(l.tokens)\r\n}", "func (o *Operator) Start(ctx context.Context) error {\n\treturn o.manager.Start(ctx)\n}", "func (l *lexer) run() {\n\tfor l.state = lexAll; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n\tclose(l.Items)\n}", "func (root *mTreap) start(mask, match treapIterType) treapIter {\n\tf := treapFilter(mask, match)\n\treturn treapIter{f, root.treap.findMinimal(f)}\n}", "func (b *Basic) start() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Basic.start -- \", r)\n\t\t\tgo b.start()\n\t\t}\n\t}()\n\n\tfor rec := range b.in {\n\t\tif rec.flush != nil {\n\t\t\tb.flush(rec.flush)\n\t\t} else {\n\t\t\terr := b.w.Write(rec)\n\t\t\tif err != nil {\n\t\t\t\tb.incErrorCounter()\n\t\t\t\trec.Logger().Logr().ReportError(err)\n\t\t\t} else {\n\t\t\t\tb.incLoggedCounter()\n\t\t\t}\n\t\t}\n\t}\n\tclose(b.done)\n}", "func (c *RDContext) Next(data []byte, endStream bool) error {\n\tfilters := c.Stream.dataReceivers\n\n\t// Finished processing the filters - call connection\n\tif c.currentFilter >= len(filters) {\n\t\treturn c.Stream.Connection.SendData(c.Stream, data, endStream)\n\t}\n\n\tnext := filters[c.currentFilter]\n\tc.currentFilter++\n\tc.Stream.setMiddlewareName(next.Name())\n\treturn next.ReceiveData(c, data, endStream)\n}", "func (w *Worker) StartConsuming(pendings chan Tree, done chan bool) {\n\tfor {\n\t\tselect {\n\t\tcase <- done:\n\t\t\treturn\n\t\tcase job := <- pendings:\n\n\t\t\t// Scheduler ensures that whenever job is received\n\t\t\t// left and right are already assigned\n\t\t\tjob.payloadChan <- w.boo.AggregateTrees(\n\t\t\t\tjob.left.payload,\n\t\t\t\tjob.right.payload,\n\t\t\t)\t\n\t\t}\n\t}\n}", "func (phStats *passwordHasherStats) startAccumulating() {\n\tgo phStats.accumulateStats()\n}", "func (p *Pipeline) Start() {\n\tC.gstreamer_start_pipeline(p.Pipeline, C.CString(p.id))\n}", "func (i *Ingester) Start(ctx context.Context) error {\n\tconcurrentProc := make(chan bool, nConcurrentProcessors)\n\tresultChan, err := i.getInputChannel(ctx)\n\tif err != nil {\n\t\treturn sklog.FmtErrorf(\"Error retrieving input channel: %s\", err)\n\t}\n\n\t// Continuously catch events from all input sources and push the data to the processor.\n\tgo func(doneCh <-chan bool) {\n\t\tvar resultFile ResultFileLocation = nil\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase resultFile = <-resultChan:\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// get a slot in line to call Process\n\t\t\tconcurrentProc <- true\n\t\t\tgo func(resultFile ResultFileLocation) {\n\t\t\t\tdefer func() { <-concurrentProc }()\n\t\t\t\ti.processResult(ctx, resultFile)\n\t\t\t}(resultFile)\n\t\t}\n\t}(i.doneCh)\n\treturn nil\n}", "func oetRun(value *int, procid int, steps int,\n toPred, fromPred, toSucc, fromSucc chan int) {\n for t := 0; t < steps; t += 1 {\n if (procid + t) % 2 == 0 && toSucc != nil {\n exchangeWith(mini, value, fromSucc, toSucc)\n }\n if (procid + t) % 2 == 1 && toPred != nil {\n exchangeWith(maxi, value, fromPred, toPred)\n }\n }\n}", "func (n *network) Start() {\n\n\tfor _, l := range n.learners {\n\t\tgo l.Run()\n\t}\n\n\tfor _, a := range n.acceptors {\n\t\tgo a.Run()\n\t}\n\n\tfor _, p := range n.proposers {\n\t\tgo p.Run()\n\t}\n\t\n}", "func (t *Transport) start(msg Message, stream *Stream, out []byte) (n int) {\n\tatomic.AddUint64(&t.nTxstart, 1)\n\tn = tag2cbor(tagCborPrefix, out) // prefix\n\tn += arrayStart(out[n:]) // 0x9f (start stream as cbor array)\n\tn += t.framepkt(msg, stream, out[n:]) // packet\n\treturn n\n}", "func (o *Operator) Start() error {\n\tstopCh := signals.SetupSignalHandler()\n\n\treturn o.manager.Start(stopCh)\n}", "func (ss *StreamerServer) handleStartStreams(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tglog.Infof(\"Got request: '%s'\", string(b))\n\tssr := &model.StartStreamsReq{}\n\terr = json.Unmarshal(b, ssr)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tglog.Infof(\"Start streams request %+v\", *ssr)\n\tif ssr.Host == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"Should specify 'host' field\"))\n\t\treturn\n\t}\n\tif ssr.Repeat <= 0 {\n\t\tssr.Repeat = 1\n\t}\n\tif ssr.Simultaneous <= 0 {\n\t\tssr.Simultaneous = 1\n\t}\n\tif ssr.FileName == \"\" {\n\t\tssr.FileName = \"BigBuckBunny.mp4\"\n\n\t}\n\tif ssr.RTMP == 0 {\n\t\tssr.RTMP = 1935\n\t}\n\tif ssr.Media == 0 {\n\t\tssr.Media = 8935\n\t}\n\tif ssr.ProfilesNum != 0 {\n\t\tmodel.ProfilesNum = ssr.ProfilesNum\n\t}\n\tif _, err := os.Stat(ssr.FileName); os.IsNotExist(err) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(`File ` + ssr.FileName + ` does not exists`))\n\t\treturn\n\t}\n\tglog.Infof(\"Get request: %+v\", ssr)\n\tif !ssr.DoNotClearStats {\n\t\tss.streamer = testers.NewStreamer(ss.wowzaMode)\n\t}\n\tvar streamDuration time.Duration\n\tif ssr.Time != \"\" {\n\t\tif streamDuration, err = ParseStreamDurationArgument(ssr.Time); err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\t}\n\n\tbaseManifestID, err := ss.streamer.StartStreams(ssr.FileName, ssr.Host, strconv.Itoa(ssr.RTMP), strconv.Itoa(ssr.Media), ssr.Simultaneous,\n\t\tssr.Repeat, streamDuration, true, ssr.MeasureLatency, true, 3, 5*time.Second, 0)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tres, err := json.Marshal(\n\t\t&model.StartStreamsRes{\n\t\t\tSuccess: true,\n\t\t\tBaseManifestID: baseManifestID,\n\t\t},\n\t)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tw.Write(res)\n}", "func (s *quicHandler) receiveDataFromSources() {\n\tfor {\n\t\tselect {\n\t\tcase item, ok := <-s.source:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// one stream for each flows/sinks.\n\t\t\tflows, sinks := Build(s.serverlessConfig, &s.connMap)\n\t\t\tstream := DispatcherWithFunc(flows, item)\n\n\t\t\tgo func() {\n\t\t\t\tfor customer := range stream.Observe(rxgo.WithErrorStrategy(rxgo.ContinueOnError)) {\n\t\t\t\t\tif customer.Error() {\n\t\t\t\t\t\tfmt.Println(customer.E.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tvalue := customer.V.([]byte)\n\n\t\t\t\t\t// sinks\n\t\t\t\t\tfor _, sink := range sinks {\n\t\t\t\t\t\tgo sendDataToSink(sink, value, \"Zipper sent frame to sink\", \"❌ Zipper sent frame to sink failed.\")\n\t\t\t\t\t}\n\n\t\t\t\t\t// Zipper-Senders\n\t\t\t\t\tfor _, sender := range s.zipperSenders {\n\t\t\t\t\t\tif sender == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tgo sendDataToSink(sender, value, \"[Zipper Sender] sent frame to downstream zipper.\", \"❌ [Zipper Sender] sent frame to downstream zipper failed.\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n\tstatus, _ := px.Status(seq)\n\tif status == Decided {\n\t\treturn\n\t}\n\tif status == Forgotten {\n\t\treturn\n\t}\n\n\tpx.proposerMgr.mu.Lock()\n\tdefer px.proposerMgr.mu.Unlock()\n\tif _, ok := px.proposerMgr.proposers[seq]; ok {\n\t\treturn\n\t}\n\n\tnewProp := Proposer{}\n\tnewProp.me = px.me\n\tnewProp.nP = 0\n\tnewProp.vP = v\n\tnewProp.parent = px\n\tpx.proposerMgr.proposers[seq] = &newProp\n\tgo newProp.ProposerLoop(seq)\n}", "func (s *BasevhdlListener) EnterLogical_operator(ctx *Logical_operatorContext) {}", "func (t *transmitLooper) next() bool {\n\tif t.state == tsEnd { // are we done done?\n\t\tlog.Fatalf(\"bad state, transmitLooper should know its done!\")\n\t}\n\tif t.state == tsParams {\n\t\tt.state = tsEnd\n\t\treturn true\n\t}\n\tif t.emitterIndex == len(t.emitters) { //sections done?\n\t\tt.state = tsParams\n\t\treturn false\n\t}\n\tt.current = t.emitters[t.emitterIndex]\n\tt.current.next()\n\tt.emitterIndex++\n\treturn true\n}", "func inputSourceRunner(ctx context.Context, src InputSource, outCh chan<- Data, errQueue *queue.Queue) {\n\tfor src.Next(ctx) {\n\t\tdata := src.Data()\n\n\t\tselect {\n\t\tcase outCh <- data:\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n\t// Check for errors\n\tif err := src.Error(); err != nil {\n\t\terrQueue.Append(fmt.Errorf(\"pipeline input source: %v\", err))\n\t}\n}", "func (m *Module) Stream(s bar.Sink) {\n\toutputFunc := m.outputFunc.Get().(func(State) bar.Output)\n\tnextOutputFunc, done := m.outputFunc.Subscribe()\n\tdefer done()\n\n\tlinkSub := netlink.ByName(m.intf)\n\tdefer linkSub.Unsubscribe()\n\n\tstate := getState(linkSub.Get().State)\n\tfor {\n\t\ts.Output(outputFunc(state))\n\t\tselect {\n\t\tcase <-linkSub.C:\n\t\t\tstate = getState(linkSub.Get().State)\n\t\tcase <-nextOutputFunc:\n\t\t\toutputFunc = m.outputFunc.Get().(func(State) bar.Output)\n\t\t}\n\t}\n}" ]
[ "0.6233072", "0.6058875", "0.59076184", "0.58346933", "0.5613373", "0.55800617", "0.5572317", "0.5537268", "0.531643", "0.5313318", "0.5272919", "0.52699506", "0.52696264", "0.5269485", "0.5239846", "0.5237453", "0.5198518", "0.5177997", "0.5172955", "0.51625824", "0.5154037", "0.5120726", "0.5094484", "0.50772905", "0.5060705", "0.5050686", "0.5035174", "0.5033191", "0.50283873", "0.5003943", "0.5000734", "0.4979043", "0.49717328", "0.49717328", "0.49664336", "0.4956822", "0.49505496", "0.49468073", "0.49374342", "0.4921857", "0.4913864", "0.4913218", "0.48989594", "0.48978823", "0.4896992", "0.48822027", "0.48820612", "0.4845738", "0.48443452", "0.48371473", "0.48369715", "0.48267952", "0.4800991", "0.4797748", "0.47973388", "0.47955325", "0.47806266", "0.47568056", "0.475604", "0.47556266", "0.47548", "0.4751091", "0.47374862", "0.47297883", "0.47240373", "0.47185484", "0.4708165", "0.47030634", "0.46968916", "0.46909055", "0.4684159", "0.46790254", "0.46695116", "0.46612787", "0.46559393", "0.46550068", "0.4650793", "0.4647186", "0.464639", "0.464315", "0.46430263", "0.46304777", "0.46165684", "0.4604914", "0.45994598", "0.45929384", "0.45922476", "0.45905918", "0.45882937", "0.4580854", "0.45800278", "0.4575359", "0.4573169", "0.45714262", "0.45592794", "0.45564434", "0.45542017", "0.45517302", "0.45503217", "0.45500088" ]
0.6706518
0
stop stops the processor.
func (e *binaryExprEvaluator) stop() { e.lhs.stop() e.rhs.stop() syncClose(e.done) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *literalProcessor) stop() { syncClose(p.done) }", "func (r *reaper) stop() {\n\tr.stopCh <- struct{}{}\n}", "func (w *Processor) Stop() {\n\tclose(w.stop)\n}", "func (c *Controller) stop(name types.NamespacedName) {\n\tproc, ok := c.procs[name]\n\tif !ok {\n\t\treturn\n\t}\n\n\tif proc.cancelFunc == nil {\n\t\treturn\n\t}\n\tproc.cancelFunc()\n\t<-proc.doneCh\n\tproc.probeWorker = nil\n\tproc.cancelFunc = nil\n\tproc.doneCh = nil\n}", "func (d *D) stop() {\n\tclose(d.stopCh)\n}", "func (c *Processor) Stop() (err error) {\n\tc.runState = RunStateStopped\n\treturn\n}", "func stop(c *cli.Context) error {\n\n\tif !isSystemRunning() {\n\t\treturn nil\n\t}\n\t//readers, writers, _, controllers := getIPAddresses()\n\treaders, writers, _, _ := getIPAddresses()\n\n\tfor _, ipaddr := range readers {\n\t\tfmt.Println(\"reader\", ipaddr, \"StopProcess\")\n\t\tipaddrs := make([]string, 1)\n\t\tipaddrs[0] = ipaddr\n\t\tsendCommandToControllers(ipaddrs, \"StopProcess\", \"\")\n\n\t}\n\tfor _, ipaddr := range writers {\n\t\tfmt.Println(\"writer\", ipaddr, \"StopProcess\")\n\t\tipaddrs := make([]string, 1)\n\t\tipaddrs[0] = ipaddr\n\t\tsendCommandToControllers(ipaddrs, \"StopProcess\", \"\")\n\n\t}\n\n\t//sendCommandToControllers(controllers, \"StopReaders\", \"\")\n\t//sendCommandToControllers(controllers, \"StopWriters\", \"\")\n\t//sendCommandToControllers(controllers, \"StopServers\", \"\")\n\treturn nil\n}", "func (w *worker) stop() {\n\tatomic.StoreInt32(&w.running, 0)\n}", "func (s *schedule) stop() {\n\tif !s.running {\n\t\treturn\n\t}\n\ts.running = false\n\ts.stopCh <- struct{}{}\n}", "func (r *RunCommand) stop() {\n\tr.logTail.Stop()\n\tr.pw.Stop()\n}", "func (sl *ReceiverLoop) stop() {\n\tsl.cancel()\n\t<-sl.stopped\n}", "func (w *worker) stop() {\n\tselect {\n\tcase w.stopCh <- struct{}{}:\n\tdefault: // Non-blocking.\n\t}\n}", "func (np *nodeProcess) stop() error {\n\terr := np.node.Stop()\n\tnp.rawClient.Kill()\n\treturn err\n}", "func (bc *BotCommand) stop() {\n\tbc.Lock()\n\tdefer bc.Unlock()\n\tbc.running = false\n}", "func (w *worker) stop() {\n\tw.quitChan <- true\n}", "func (v *vtStopCrawler) stop() {\n\tfor _, worker := range v.workers {\n\t\tworker.stop()\n\t}\n\tclose(v.done)\n}", "func (m *Module) stop() {\n\tm.mux.Lock()\n\tdefer m.mux.Unlock()\n\tif m.started && !m.isFinished() {\n\t\tclose(m.done)\n\t}\n}", "func (s *server) stop() {\n\ts.stopMu.Lock()\n\tdefer s.stopMu.Unlock()\n\n\tclose(s.stopCh)\n\ts.stopCh = make(chan struct{})\n}", "func stop() {\n\trobot.RLock()\n\tpr := robot.pluginsRunning\n\tstop := robot.stop\n\trobot.RUnlock()\n\tLog(Debug, fmt.Sprintf(\"stop called with %d plugins running\", pr))\n\trobot.Wait()\n\tbrainQuit()\n\tclose(stop)\n}", "func stop() {\n\tlog.Info(\"Maison is stopping...\")\n\n\tclosePlugins()\n\n\t// TODO: close stores\n\n\tbus.Stop()\n\n\tlog.Info(\"Maison is stopped\")\n}", "func (f *Processor) Stop() {\n\tclose(f.quit)\n\tf.eventsSemaphore.Terminate()\n\tf.wg.Wait()\n\tf.buffer.Clear()\n}", "func (w *Watch) stop() {\n\tw.done <- struct{}{}\n}", "func (e *exec) stop(ctx context.Context) {\n\t// Lock the mutex to prevent race conditions with Start\n\te.execMutex.Lock()\n\tdefer e.execMutex.Unlock()\n\n\t// Do the shutdown sequence once until the startup sequence resets\n\te.stopOnce.Do(func() {\n\t\tdefer func() {\n\t\t\t// reset startOnce so the startup sequence can happen again\n\t\t\te.startOnce = sync.Once{}\n\t\t}()\n\t\te.stopFn(ctx)\n\t})\n}", "func Stop() {\n\tstopRunning <- true\n\n}", "func stop(process *os.Process) {\n\tfmt.Printf(\"Stopping %s...\", AppName)\n\tif err := Stop(process); err != nil {\n\t\tfailed(err)\n\t} else {\n\t\tfmt.Println(\"OK\")\n\t}\n}", "func (hb *heartbeat) stop() {\n\tselect {\n\tcase hb.stopChan <- struct{}{}:\n\tdefault:\n\t}\n}", "func (p *Prober) Stop() {\n\tclose(p.stop)\n}", "func (f *framework) stop() {\n\tclose(f.epochChan)\n}", "func Stop() {\n\ts.Stop()\n}", "func (s *Scanner) Stop() {\n\ts.stop <- struct{}{}\n}", "func (s *ContinuousScanner) Stop() {\n\ts.stop <- struct{}{}\n}", "func (a *appsec) stop() {\n\ta.unregisterWAF()\n\ta.limiter.Stop()\n}", "func (t *channelTransport) stop() {\n\tt.stopChan <- struct{}{}\n}", "func (p *Processor) Stop() {\n\tc := p.stopChan\n\tp.stopChan = nil\n\tc <- struct{}{}\n\tp.Conn.Close()\n}", "func (i2c I2C) stop() {\n\t// Send stop condition.\n\tavr.TWCR.Set(avr.TWCR_TWEN | avr.TWCR_TWINT | avr.TWCR_TWSTO)\n\n\t// Wait for stop condition to be executed on bus.\n\tfor !avr.TWCR.HasBits(avr.TWCR_TWSTO) {\n\t}\n}", "func (master *ProcMaster) stop(proc ProcContainer) error {\n\tif proc.IsAlive() {\n\t\twaitStop := master.Watcher.StopWatcher(proc.Identifier())\n\t\terr := proc.Stop()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif waitStop != nil {\n\t\t\t<-waitStop\n\t\t\tproc.SetStatus(\"stopped\")\n\t\t}\n\t\terr = proc.Delete()\n\t\tif err != nil {\n\t\t\tlogger.Warning(\"Error while deleting %s (%d) process files: %s\", proc.Identifier(), proc.GetPid(), err)\n\t\t\t// We don't return after this error. It's not super critical.\n\t\t}\n\t} else {\n\t\t// If the process is not longer active, we can remove any files it has sitting around.\n\t\terr := proc.Delete()\n\t\tif err != nil {\n\t\t\tlogger.Warning(\"Error while deleting %s (%d) process files: %s\", proc.Identifier(), proc.GetPid(), err)\n\t\t}\n\t}\n\tlogger.Infof(\"Proc %s successfully stopped.\", proc.Identifier())\n\treturn nil\n}", "func Stop() {\n\tinstance.stop()\n}", "func (t *Tracer) Stop() {}", "func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}", "func (s *Streamer) Stop() {\n\tclose(s.stopc)\n}", "func (sp *scrapePool) stop() {\n\tsp.mtx.Lock()\n\tdefer sp.mtx.Unlock()\n\tsp.cancel()\n\tsp.targetMtx.Lock()\n\tvar wg sync.WaitGroup\n\twg.Add(len(sp.loops))\n\tfor fp, l := range sp.loops {\n\t\tgo func(l *scrapeLoop) {\n\t\t\tl.stop()\n\t\t\twg.Done()\n\t\t}(l)\n\t\tdelete(sp.loops, fp)\n\t\tdelete(sp.activeTargets, fp)\n\t}\n\tsp.targetMtx.Unlock()\n\twg.Wait()\n\tsp.client.CloseIdleConnections()\n}", "func (i *I2C) stop() {\n\t// Page 9, section 3.1.4 START and STOP conditions\n\ti.scl.Out(gpio.Low)\n\ti.sleepHalfCycle()\n\ti.scl.Out(gpio.High)\n\ti.sleepHalfCycle()\n\ti.sda.Out(gpio.High)\n\t// TODO(maruel): This sleep could be skipped, assuming we wait for the next\n\t// transfer if too quick to happen.\n\ti.sleepHalfCycle()\n}", "func stop() error {\n\tif spammerInstance == nil {\n\t\treturn ErrSpammerDisabled\n\t}\n\n\tspammerLock.Lock()\n\tdefer spammerLock.Unlock()\n\n\tstopWithoutLocking()\n\n\tisRunning = false\n\n\treturn nil\n}", "func (hsp HistoryServicePrecacher) Stop() { hsp.pc.Stop() }", "func (f *feeService) stop() {\n\tif err := f.srv.Shutdown(context.Background()); err != nil {\n\t\tfmt.Printf(\"error: cannot stop fee api: %v\", err)\n\t}\n\n\tf.wg.Wait()\n}", "func (margelet *Margelet) Stop() {\n\tmargelet.running = false\n}", "func (ms *MarvinServer) Stop() {\n\n}", "func (p *Proposer) Stop() {\n\tp.stop <- struct{}{}\n}", "func (x *x509Handler) stop() {\n\tclose(x.stopChan)\n}", "func (g *DarwinGrabber) Stop() {\n\tclose(g.stop)\n}", "func (b *Blinker) Stop() {\n\tclose(b.stop)\n}", "func (p *Peer) stop() {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\tp.heartbeatTimer.Stop()\n}", "func (p *Peer) stop() {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\tp.heartbeatTimer.Stop()\n}", "func (e *Engine) stop() error {\n\te.booted = false\n\n\t// instruct engine to shutdown\n\tshutdown := \"shutdown\"\n\tcommunication.Publish(\n\t\tnaming.Topic(e.Index, naming.Command),\n\t\tnaming.Publisher(e.Index, naming.Command),\n\t\tshutdown)\n\n\t// stop subscribing to engine's commands and events\n\te.Communication.Teardown()\n\n\t// TODO create graphic for MQTT hierarchy, whos's publishing what to whom and why\n\t// TODO explain MQTT hierarchy\n\treturn nil\n}", "func DeviceProcessStop() {\n\tBus.Broadcast(&bus.Message{Type: bus.Exit})\n}", "func (s *status) stopping() error { return s.set(\"stopping\", \"STOPPING=1\") }", "func stop() {\n\t// Close the channel to stop the mail daemon.\n\tclose(emailCh)\n}", "func (l *Loader) Stop() {\n\tl.spn.Stop()\n}", "func (e *Engine) Stop() {\n\te.running = false\n}", "func (c *LP) Stop() error {\n\treturn base.Stop(c.mdc)\n}", "func (s *RepService) Stop() {\n\ts.running = false\n}", "func (sp *StreamPool) Stop() {\n\t//sw.quitCh <- true\n}", "func (pool *WebSocketPool)stop() {\n\tclose(pool.input)\n}", "func (m *Machine) Stop() {\n\tm.stopSign <- struct{}{}\n}", "func (rtspService *RTSPService) Stop(msg *wssapi.Msg) (err error) {\n\treturn\n}", "func (n *Node) stop() (err error) {\n\tif err = n.stateCheck(nodeRunning, nodeHealthChecking); err != nil {\n\t\treturn\n\t}\n\tn.setState(nodeShuttingDown)\n\tn.stopChan <- true\n\tn.expireTicker.Stop()\n\tclose(n.stopChan)\n\tlogDebug(\"[Node]\", \"(%v) shutting down.\", n)\n\tn.shutdown()\n\treturn\n}", "func (a API) Stop(cmd *None) (e error) {\n\tRPCHandlers[\"stop\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (p *BoxPeer) Stop() {\n\tp.proc.Close()\n}", "func (j Jibi) Stop() {\n\tj.RunCommand(CmdStop, nil)\n}", "func (eis *eventSocket) stop() error {\n\teis.log.Info(\"closing Chain IPC\")\n\terrs := wrappers.Errs{}\n\terrs.Add(eis.unregisterFn(), eis.socket.Close())\n\treturn errs.Err\n}", "func (p *GaugeCollectionProcess) Stop() {\n\tclose(p.stop)\n}", "func (sc *controller) stopScraping() {\n\tclose(sc.done)\n}", "func (m *Master) Stop(procSign string, out *StopRsp) error {\n\trtnCode, err := m.StopInstance(procSign, syscall.SIGINT)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*out = StopRsp{\n\t\tReturnCode: rtnCode,\n\t\tProcSign: procSign,\n\t}\n\treturn nil\n}", "func Stop(pid int) {\n\t_ = unix.Kill(pid, unix.SIGSTOP)\n}", "func (n *Notary) Stop() {\n\tclose(n.stopCh)\n}", "func (nm *nodeManager) stop(path string) error {\n\tnodeProcess, exists := nm.nodes[path]\n\tif !exists {\n\t\treturn nil\n\t}\n\tdelete(nm.nodes, nodeProcess.path)\n\treturn nodeProcess.stop()\n}", "func (e *Engine) Stop() {\n\tif atomic.CompareAndSwapInt32(&e.stopping, 0, 1) {\n\t\te.wg.Wait()\n\t\te.running = 0\n\t\te.stopping = 0\n\t}\n}", "func (o *influxDBLogger) stop() error {\n\treturn nil\n}", "func (h *ProxyHealth) stop() {\n\tif h.cancel != nil {\n\t\th.cancel <- struct{}{}\n\t\tclose(h.cancel)\n\t\th.cancel = nil\n\t}\n}", "func (b *BTCVanity) Stop() {\n\tb.stop <- true\n}", "func (b *BTCVanity) Stop() {\n\tb.stop <- true\n}", "func op_STOP(pc *uint64, in *interpreter, ctx *callCtx) uint64 {\n\treturn 0\n}", "func (m *mapper) stop() { syncClose(m.done) }", "func Stop(args ...string) {\n switch {\n case cfg.Kill:\n Kill(args...)\n default:\n runInstances(\"Stopping\", func(i int, id string) error {\n defer os.Remove(pidFileName(i))\n return run(\"stop\", id)\n })\n }\n}", "func (g *Gopher) Stop() {\n\tif g.state == running {\n\t\tg.done <- struct{}{}\n\t\tg.state = stopped\n\t\tg.finalize()\n\t}\n}", "func (s *Envoy) Stop() error {\n\tlog.Printf(\"Kill Envoy ...\\n\")\n\terr := s.cmd.Process.Kill()\n\tlog.Printf(\"Kill Envoy ... Done\\n\")\n\treturn err\n}", "func (q *testQueue) stop() {\n\tif atomic.LoadInt32(&q.active) == 0 {\n\t\treturn\n\t}\n\n\tatomic.StoreInt32(&q.active, 0)\n\n\tclose(q.wait)\n\tq.muw.Lock()\n\tq.wg.Wait()\n\tq.muw.Unlock()\n}", "func (ldp *loopbackDataPlane) stop() bool {\n\t//This function is empty because packet capture in loopback is done as part of p4rt packet-ins but stop() still has to be defined as part of the interface definition\n\treturn true\n}", "func (_m *MockCompactionPlanContext) stop() {\n\t_m.Called()\n}", "func (w *Worker) Stop() {\n\tclose(w.stopCh)\n}", "func (v *VpxEncoder) Stop() {\n\tv.release()\n}", "func (app *frame) Stop() {\n\tapp.isStopped = true\n}", "func (f *flushDaemon) stop() {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\tif f.stopC == nil { // daemon not running\n\t\treturn\n\t}\n\n\tf.stopC <- struct{}{}\n\t<-f.stopDone\n\n\tf.stopC = nil\n\tf.stopDone = nil\n}", "func Stop() {\n\t// /bin/dbus-send --system --dest=org.ganesha.nfsd --type=method_call /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.shutdown\n}", "func (service *Service) Stop() {\n\tclose(service.jobQueue)\n}", "func (p *Pipeline) Stop() {\n\tC.gstreamer_receive_stop_pipeline(p.Pipeline)\n}", "func (s *samplerBackendRateCounter) Stop() {\n\tclose(s.exit)\n\t<-s.stopped\n}", "func execStop(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tsignal.Stop(args[0].(chan<- os.Signal))\n}", "func (_e *MockCompactionPlanContext_Expecter) stop() *MockCompactionPlanContext_stop_Call {\n\treturn &MockCompactionPlanContext_stop_Call{Call: _e.mock.On(\"stop\")}\n}", "func (c *haTracker) stop() {\n\tif c.cfg.EnableHATracker {\n\t\tc.cancel()\n\t\t<-c.done\n\t}\n}", "func (jbobject *ShuffleShuffleBlockResolver) Stop() {\n\t_, err := jbobject.CallMethod(javabind.GetEnv(), \"stop\", javabind.Void)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}" ]
[ "0.76916784", "0.76539624", "0.75390005", "0.7419075", "0.7382383", "0.734479", "0.7291145", "0.72867364", "0.71419865", "0.7018204", "0.6978908", "0.69516915", "0.69140995", "0.68218535", "0.67954063", "0.67853224", "0.677519", "0.6761666", "0.67545587", "0.67410165", "0.6731457", "0.67268294", "0.6701503", "0.6699718", "0.668738", "0.66784805", "0.66776145", "0.6671549", "0.6653921", "0.6647926", "0.6628805", "0.6605749", "0.65882796", "0.65835404", "0.656615", "0.6564948", "0.654966", "0.65268016", "0.6518796", "0.65160716", "0.6513758", "0.6503734", "0.6494055", "0.6481644", "0.64635026", "0.64507544", "0.6440992", "0.6437566", "0.643231", "0.64270043", "0.6425185", "0.64238876", "0.64238876", "0.6421931", "0.6406666", "0.64017534", "0.64012754", "0.6398002", "0.6392595", "0.6386989", "0.6384363", "0.63808835", "0.6375928", "0.63737214", "0.63727415", "0.63712984", "0.6370961", "0.6344408", "0.63350654", "0.63184446", "0.6317175", "0.6314615", "0.6307359", "0.6302393", "0.62964183", "0.6294968", "0.62939095", "0.6292612", "0.62845874", "0.6282491", "0.6282491", "0.6282074", "0.62819016", "0.62765014", "0.6264128", "0.62547445", "0.6250187", "0.6246337", "0.62462145", "0.62456036", "0.6229506", "0.62279564", "0.6227576", "0.62145615", "0.6210619", "0.6205481", "0.6202048", "0.61991125", "0.6197283", "0.6194643", "0.61818457" ]
0.0
-1
C returns the streaming data channel.
func (e *binaryExprEvaluator) C() <-chan map[string]interface{} { return e.c }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Subscription) C() <-chan interface{} {\n\treturn s.channel\n}", "func (s *subscription) C() <-chan interface{} {\n\treturn s.c\n}", "func (c *dataReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (uc *UnboundedChannel) Get() <-chan interface{} {\n\treturn uc.channel\n}", "func (l *Logger) C() chan<- interface{} {\n\treturn l.src\n}", "func (s *Scanner) C() <-chan []Measurement {\n\treturn s.ch\n}", "func (p *HostedProgramInfo) Channel() io.ReadWriteCloser {\n\treturn p.TaoChannel\n}", "func (conn *Connection) Channel() chan []byte {\n\treturn conn.channel\n}", "func (p *literalProcessor) C() <-chan map[string]interface{} { return p.c }", "func (remote *SerialRemote) Channel() chan []byte {\n\treturn remote.channel\n}", "func (ticker *PausableTicker) GetChannel() <-chan time.Time {\n\treturn ticker.channel\n}", "func (o *Output) Read(channel int) *Buffer {\n\treturn o.channels[channel].Copy()\n}", "func (f *FFS) Get(ctx context.Context, c cid.Cid) (io.Reader, error) {\n\tstream, err := f.client.Get(ctx, &rpc.GetRequest{\n\t\tCid: util.CidToString(c),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader, writer := io.Pipe()\n\tgo func() {\n\t\tfor {\n\t\t\treply, err := stream.Recv()\n\t\t\tif err == io.EOF {\n\t\t\t\t_ = writer.Close()\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\t_ = writer.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = writer.Write(reply.GetChunk())\n\t\t\tif err != nil {\n\t\t\t\t_ = writer.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn reader, nil\n}", "func (s *p4RuntimeServer) StreamChannel(stream p4.P4Runtime_StreamChannelServer) error {\n\tfmt.Println(\"Starting bi-directional channel\")\n\tfor {\n\t\tinData, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Printf(\"%v\", inData)\n\t}\n\n\treturn nil\n}", "func (wp *Pool) C() <-chan Processor {\n\treturn wp.resultChan\n}", "func (p *pipeline) Channel() Channel {\n\treturn p.channel\n}", "func (c *webSocketClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (r *reducer) C() <-chan map[string]interface{} { return r.c }", "func (c *Computation) Data() <-chan *messages.DataMessage {\n\treturn c.dataCh\n}", "func (p *Player) Channel() *api.Channel {\n\tretCh := make(chan *api.Channel)\n\tp.chGetChannel <- retCh\n\tc := <-retCh\n\treturn c\n}", "func (me *T) Data() <-chan float64 {\n\n\t// Create channel.\n\t//\n\t// We will return this to the caller.\n\t//\n\t// We will also spawn a goroutine and output the data from this datasack has onto it.\n\t//\n\t\tout := make(chan float64)\n\n\t// Spawn a goroutine that will output the data from this datasack onto the channel\n\t// we previously created.\n\t//\n\t// Note that this goroutine will probably block. But that's OK, since it is in\n\t// its own goroutine (and shouldn't take anything else down with it).\n\t//\n\t\tgo func() {\n\t\t\tfor _,value := range me.slice {\n\t\t\t\tout <- value\n\t\t\t}\n\n\t\t\tclose(out)\n\t\t}()\n\n\t// Return.\n\t\treturn out\n}", "func ReadData(c <-chan string) {\n\tfmt.Printf(\"Read Data: %s\\n\", <-c) // 只能收\n}", "func (wet *WETReader) Channel() (<-chan struct { Entry *WETEntry; Err error }) {\n channel := make(chan struct { Entry *WETEntry; Err error })\n go func() {\n defer func() {\n wet.Close()\n close(channel)\n }()\n for {\n entry, err := wet.extractEntry()\n channel <- struct { Entry *WETEntry; Err error }{ entry, err }\n if err != nil {\n return\n }\n }\n }()\n return channel\n}", "func (s *GameSocket) ReadChannel() <-chan *packet.Packet {\n\treturn s.readChan\n}", "func GetChannel(protocol, host string, port int, secureConfig *tls.Config) (ReaderWriterCloser, error) {\n\tvar conn net.Conn\n\tvar err error\n\tconn, err = net.Dial(protocol, host+\":\"+strconv.Itoa(port))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif protocol == \"tcp\" {\n\t\tconn.(*net.TCPConn).SetKeepAlive(true)\n\t\tconn.(*net.TCPConn).SetKeepAlivePeriod(30 * time.Second)\n\t}\n\tif secureConfig != nil {\n\t\tconn = tls.Client(conn, secureConfig)\n\t}\n\tvar readerWriter ReaderWriterCloser = &Channel{\n\t\tprotocol: protocol,\n\t\thost: host,\n\t\tport: port,\n\t\tconn: conn,\n\t\tmaxRead: 8 * 1024,\n\t\treadBuffer: make([]byte, 0),\n\t\twriteBuffer: make([]byte, 0),\n\t\twriteChannel: make(chan writeComplete, 100),\n\t\treadTimeout: 60 * time.Second,\n\t\twriteTimeout: 60 * time.Second,\n\t}\n\tgo readerWriter.(*Channel).writeRoutine()\n\treturn readerWriter, nil\n}", "func (f *feedback) Channel() (<-chan *FeedbackMessage, error) {\n\tif f.conn != nil {\n\t\treturn f.chanel, nil\n\t}\n\n\tif err := f.createConnection(); err != nil {\n\t\tlogerr(\"Unable to start feedback connection: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tf.stopWait.Add(1)\n\tgo f.monitorService()\n\n\treturn f.chanel, nil\n}", "func (c *webSocketFrameReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (res channelBase) Channel() *types.Channel {\n\treturn res.channel\n}", "func (o *KinesisOutput) GetOutputChannel() chan []byte {\n\treturn o.outputChannel\n}", "func (s VectOp) Stream() <-chan float64 {\n\tch := make(chan float64)\n\tgo feed(ch, s)\n\treturn ch\n}", "func (s *f64) Channel(c int) Floating {\n\treturn floatingChannel{\n\t\tbuffer: s,\n\t\tchannel: c,\n\t}\n}", "func (m *Manager) InputChannel() chan []byte {\n\treturn m.byteStream\n}", "func (m *MetricsExtracor) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (m *Module) Stream() <-chan bar.Output {\n\tch := base.NewChannel()\n\tgo m.worker(ch)\n\treturn ch\n}", "func (c *webSocketFrameSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (meta *MetaAI) GetChannel(c chan string) {\n\tmeta.l.Lock()\n\tdefer meta.l.Unlock()\n\n\tmeta.i = c\n}", "func getData(client pb.DataClient, filter *pb.DataFilter) {\r\n\t// calling the streaming API\r\n\tstream, err := client.GetData(context.Background(), filter)\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Error on get data: %v\", err)\r\n\t}\r\n\tfor {\r\n\t\tdata, err := stream.Recv()\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t}\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatalf(\"%v.GetData(_) = _, %v\", client, err)\r\n\t\t}\r\n\t\tlog.Printf(\"Data: %v\", data)\r\n\t}\r\n}", "func (m *mapper) C() <-chan map[string]interface{} { return m.c }", "func (c *requestServedFromCacheClient) GetStream() rpcc.Stream { return c.Stream }", "func (e *EventNotif) Channel() (res <-chan Event) {\n\treturn e.eventsCh\n}", "func (c *webSocketCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (stream *MAMWriteStream) Open() (trinary.Trytes, error) {\n\tchannelID, err := stream.m.ChannelCreate(5)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstream.currentChannelID = channelID\n\treturn channelID, nil\n}", "func (gi *Invoker) StreamRecv(param *common.Params) error {\n\t//gloryPkg := newGloryRequestPackage(\"\", param.MethodName, uint64(common.StreamSendPkg), param.Seq)\n\t//gloryPkg.Params = append(gloryPkg.Params, param.Value)\n\t//gloryPkg.Header.ChanOffset = param.ChanOffset\n\t//gloryPkg.Header.Seq = param.Seq\n\t//if err := gloryPkg.sendToConn(gi.gloryConnClient, gi.handler); err != nil {\n\t//\tlog.Error(\"StreamRecv: gloryPkg.sendToConn(gi.conn, gi.handler) err =\", err)\n\t//\treturn GloryErrorConnErr\n\t//}\n\treturn nil\n}", "func WrapDataChannel(rtcDataChannel RTCDataChannel) (*DataChannel, error) {\n\trr, rw, err := Pipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdc := &DataChannel{\n\t\tdc: rtcDataChannel,\n\t\trr: rr,\n\t}\n\tdc.dc.OnMessage(func(data []byte) {\n\t\tlog.WithField(\"data\", data).\n\t\t\tDebug(\"datachannel message\")\n\n\t\tif rw != nil {\n\t\t\t_, err := rw.Write(data)\n\t\t\tif err != nil {\n\t\t\t\trw.Close()\n\t\t\t\trw = nil\n\t\t\t}\n\t\t}\n\t})\n\treturn dc, nil\n}", "func (c *responseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *CounterChannel) Get() uint64 {\n\tc.check()\n\treturn <-c.readCh\n}", "func (c *webTransportClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (m *MetricsHolder) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (c *ChanReader) Read(out []byte) (int, error) {\n\tif c.buffer == nil {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(out, c.buffer)\n\tc.buffer = c.buffer[n:]\n\tif len(out) <= len(c.buffer) {\n\t\treturn n, nil\n\t} else if n > 0 {\n\t\t// We have some data to return, so make the channel read optional\n\t\tselect {\n\t\tcase p := <-c.input:\n\t\t\tif p == nil { // Stream was closed\n\t\t\t\tc.buffer = nil\n\t\t\t\tif n > 0 {\n\t\t\t\t\treturn n, nil\n\t\t\t\t}\n\t\t\t\treturn 0, io.EOF\n\t\t\t}\n\t\t\tn2 := copy(out[n:], p.Data)\n\t\t\tc.buffer = p.Data[n2:]\n\t\t\treturn n + n2, nil\n\t\tdefault:\n\t\t\treturn n, nil\n\t\t}\n\t}\n\tvar p *StreamChunk\n\tselect {\n\tcase p = <-c.input:\n\tcase <-c.interrupt:\n\t\tc.buffer = c.buffer[:0]\n\t\treturn n, ErrInterrupted\n\t}\n\tif p == nil { // Stream was closed\n\t\tc.buffer = nil\n\t\treturn 0, io.EOF\n\t}\n\tn2 := copy(out[n:], p.Data)\n\tc.buffer = p.Data[n2:]\n\treturn n + n2, nil\n}", "func (handle *Handle) GetStream() (Stream, error) {\n\tvar s Stream\n\tvar some *C.cudaStream_t\n\t//x := C.cudnnHandle_t(handle.Pointer())\n\n\ty := C.cudnnGetStream(handle.x, some)\n\ts.stream = *some\n\treturn s, Status(y).error(\"(*Handle).GetStream\")\n}", "func (c *loadingFinishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (std *ReaderService) Read() (<-chan []byte, error) {\n\tmc := make(chan []byte, 0)\n\n\tstd.pub.Subscribe(mc)\n\n\treturn mc, nil\n}", "func (c ConnectionAdapter) Channel() (Channel, error) {\n\treturn c.Connection.Channel()\n}", "func SourceData(data ...int) <-chan int {\n\tfmt.Println(\"num:\", len(data))\n\tch := make(chan int, 80000000)\n\tgo func() {\n\t\tfor _, v := range data {\n\t\t\tch <- v\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func (c *webSocketHandshakeResponseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *baseChannels) GetS3Channel() chan *S3Object {\n\treturn c.s3Channel\n}", "func (r *Readiness) GetChannel() chan ReadinessMessage {\n\treturn r.channel\n}", "func (nc *NetClient) readChannel() chan struct {\n\t*arbor.ProtocolMessage\n\terror\n} {\n\tout := make(chan struct {\n\t\t*arbor.ProtocolMessage\n\t\terror\n\t})\n\t// read messages continuously and send results back on a channel\n\tgo func() {\n\t\tdefer func() {\n\t\t\t// ensure send on closed channel doesn't cause panic\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tif _, ok := err.(runtime.Error); !ok {\n\t\t\t\t\t// silently cancel runtime errors, but allow other errors\n\t\t\t\t\t// to propagate.\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tfor {\n\t\t\tm := new(arbor.ProtocolMessage)\n\t\t\terr := nc.ReadWriteCloser.Read(m)\n\t\t\tout <- struct {\n\t\t\t\t*arbor.ProtocolMessage\n\t\t\t\terror\n\t\t\t}{m, err}\n\t\t}\n\t}()\n\treturn out\n}", "func (c *eventSourceMessageReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func UnbufferedChannel() {\n\t/*\n\tbufferred channel would be c := make(chan int, 50)\n\tunbufferred channel\n\t */\n\tc := make(chan int)\n\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t// put number onto channel\n\t\t\t// code stops until the value is taken from the channel\n\t\t\t// like a relay race\n\t\t\tc <- i\n\t\t}\n\t}() // self executing anonymous function\n\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t// take the number off the channel\n\t\t\t// receive the value from the channel and print it\n\t\t\tv := <-c\n\t\t\tfmt.Println(v)\n\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second)\n}", "func stream_copy(src io.Reader, dst io.Writer) <-chan int {\n\tbuf := make([]byte, 1024)\n\tsync_channel := make(chan int)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif con, ok := dst.(net.Conn); ok {\n\t\t\t\tcon.Close()\n\t\t\t\tlog.Printf(\"Connection from %v is closed\\n\", con.RemoteAddr())\n\t\t\t}\n\t\t\tsync_channel <- 0 // Notify that processing is finished\n\t\t}()\n\t\tfor {\n\t\t\tvar nBytes int\n\t\t\tvar err error\n\t\t\tnBytes, err = src.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tlog.Printf(\"Read error: %s\\n\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = dst.Write(buf[0:nBytes])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Write error: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn sync_channel\n}", "func (c *CryptoStreamConn) GetDataForWriting() []byte {\n\tdefer c.writeBuf.Reset()\n\tdata := make([]byte, c.writeBuf.Len())\n\tcopy(data, c.writeBuf.Bytes())\n\treturn data\n}", "func Stream(out chan<- Value) error {\n for {\n v, err := DoSomething() // HL\n if err != nil {\n return err\n }\n out <- v // HL\n }\n }", "func streamCopy(src io.Reader, dst io.Writer) <-chan int {\n\tbuf := make([]byte, 1024)\n\tsyncChannel := make(chan int)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif con, ok := dst.(net.Conn); ok {\n\t\t\t\tcon.Close()\n\t\t\t\t//log.Printf(\"Connection from %v is closed\\n\", con.RemoteAddr())\n\t\t\t}\n\t\t\tsyncChannel <- 0 // Notify that processing is finished\n\t\t}()\n\t\tfor {\n\n\t\t\tvar nBytes int\n\t\t\tvar err error\n\t\t\tnBytes, err = src.Read(buf)\n\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\t//log.Printf(\"Read error: %s\\n\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = dst.Write(buf[0:nBytes])\n\t\t\tif err != nil {\n\t\t\t\t//log.Fatalf(\"Write error: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn syncChannel\n}", "func (v Vehicle) Stream() (chan *StreamEvent, chan error, error) {\n\turl := StreamURL + \"/stream/\" + strconv.Itoa(v.VehicleID) + \"/?values=\" + StreamParams\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.SetBasicAuth(ActiveClient.Auth.Email, v.Tokens[0])\n\tresp, err := ActiveClient.HTTP.Do(req)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\teventChan := make(chan *StreamEvent)\n\terrChan := make(chan error)\n\tgo readStream(resp, eventChan, errChan)\n\n\treturn eventChan, errChan, nil\n}", "func NewChannel() (chan *fluent.FluentRecordSet, chan Stat) {\n\tmessageCh := make(chan *fluent.FluentRecordSet, MessageChannelBufferLen)\n\tmonitorCh := make(chan Stat, MonitorChannelBufferLen)\n\treturn messageCh, monitorCh\n}", "func NewChannel() (chan *fluent.FluentRecordSet, chan Stat) {\n\tmessageCh := make(chan *fluent.FluentRecordSet, MessageChannelBufferLen)\n\tmonitorCh := make(chan Stat, MonitorChannelBufferLen)\n\treturn messageCh, monitorCh\n}", "func (c *requestInterceptedClient) GetStream() rpcc.Stream { return c.Stream }", "func (r *Receiver) Read() interface{} {\n\tutils.Debugln(\"Reading\")\n\tb := <-r.C // wait for a broadast channel\n\tv := b.v // retrieve value from received broadcastchannel\n\tr.C <- b // write same broadcastchannel to broadcastchannel\n\tr.C = b.c // broadcastchannel now becomes bc from broadcast\n\treturn v // return received value\n}", "func Stream(ctx context.Context, wC etcd.WatchChan) <-chan *etcd.Event {\n\teC := make(chan *etcd.Event, 1024)\n\n\tgo func(ctx context.Context, ec chan *etcd.Event) {\n\t\t// this unblocks any callers ranging on ec\n\t\tdefer close(ec)\n\n\t\t// etcd client will close this channel if error occurs\n\t\tfor wResp := range wC {\n\t\t\tif ok, err := chkctx.Check(ctx); ok {\n\t\t\t\tlog.Info().Str(\"component\", \"Stream\").Msgf(\"stream ctx canceled. returning: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif wResp.Canceled {\n\t\t\t\tlog.Info().Str(\"component\", \"Stream\").Msgf(\"watch channel error encountered. returning: %v\", wResp.Err())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, event := range wResp.Events {\n\t\t\t\teC <- event\n\t\t\t}\n\t\t}\n\t}(ctx, eC)\n\n\treturn eC\n}", "func (c *ChangeWatcher) outC() chan *RoomChange {\n if len(c.buffer) <= 0 {\n return nil\n }\n return c.out\n}", "func (closer *Closer) CloseChannel() chan struct{} {\n\treturn closer.channel\n}", "func (r *ChannelReader) Read(b []byte) (sz int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tif len(r.buf) <= len(b) {\n\t\t\t\tsz = len(r.buf)\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = nil\n\t\t\t} else {\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = r.buf[len(b):]\n\t\t\t\tsz = len(b)\n\t\t\t}\n\t\t\treturn sz, nil\n\t\t}\n\n\t\tvar ok bool\n\t\tif r.deadline.IsZero() {\n\t\t\tr.buf, ok = <-r.c\n\t\t} else {\n\t\t\ttimer := time.NewTimer(r.deadline.Sub(time.Now()))\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase r.buf, ok = <-r.c:\n\t\t\tcase <-timer.C:\n\t\t\t\treturn 0, context.DeadlineExceeded\n\t\t\t}\n\t\t}\n\t\tif len(r.buf) == 0 && !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n}", "func (swp *SourceWorkerPool) GetOutputChannel() (chan map[string]interface{}, error) {\n\treturn swp.outputChannel, nil\n}", "func (p *Publisher) GetChannel() *amqp.Channel {\n\tp.publicMethodsLock.Lock()\n\tdefer p.publicMethodsLock.Unlock()\n\treturn p.getChannelWithoutLock()\n}", "func StreamCreateFile(data interface{}, offset int, flags Flags) (Channel, error) {\n\tvar ch C.DWORD\n\tswitch data := data.(type) {\n\tcase CBytes:\n\t\tch = C.BASS_StreamCreateFile(1, data.Data, culong(offset), culong(data.Length), cuint(flags))\n\tcase string:\n\t\tcstring := unsafe.Pointer(C.CString(data))\n\t\tdefer C.free(cstring)\n\t\tch = C.BASS_StreamCreateFile(0, cstring, culong(offset), 0, cuint(flags))\n\tcase []byte:\n\t\tcbytes := C.CBytes(data)\n\t\tch = C.BASS_StreamCreateFile(1, cbytes, culong(offset), culong(len(data)), cuint(flags))\n\t\t// unlike BASS_SampleLoad, BASS won't make a copy of the sample data internally, which means we can't just pass a pointer to the Go bytes. Instead we need to set a sync to free the bytes when the stream it's associated with is freed\n\t\tif ch != 0 {\n\t\t\tchannel := Channel(ch)\n\t\t\t_, err := channel.SetSync(SYNC_FREE, SYNC_ONETIME, 0, SyncprocFree, cbytes)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n\treturn channelToError(ch)\n}", "func (sc *SoundCloud) Stream(track string) (io.ReadCloser, error) {\n\t// Get the HTTP Stream\n\trsp, err := http.Get(sc.streamUrl(track).String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Createa http stream buffer\n\tbuff := buffer.HTTPBuffer(rsp)\n\tgo buff.Buffer() // Start buffering\n\tscs := &SoundCloudStream{\n\t\tbuffer: buff,\n\t\tdecoder: &mpa.Reader{Decoder: &mpa.Decoder{Input: buff}},\n\t}\n\treturn scs, nil\n}", "func (cc *CounterControl) StreamValues() (chan *CounterData, error) {\n\tentity := cc.counter.ReadWildcardRequest()\n\tentityList := []*p4V1.Entity{entity}\n\n\tcounterEntityCh, err := cc.control.Client.ReadEntities(entityList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcdataChannel := make(chan *CounterData, cc.counter.Size)\n\tgo func() {\n\t\tdefer close(cdataChannel)\n\t\tfor e := range counterEntityCh {\n\t\t\tcounterData := getCounterData(e)\n\t\t\tcdataChannel <- &counterData\n\t\t}\n\t}()\n\n\treturn cdataChannel, nil\n}", "func (p *Pool) Consume() <-chan interface{} {\n\treturn p.c\n}", "func BufferedChannels(){\n\tc := make(chan int, 2)\n\tc <- 1\n\tc <- 2\n\tfmt.Println(<-c)\n\tfmt.Println(<-c)\n}", "func (std *LineReaderService) Read() (<-chan []byte, error) {\n\tmc := make(chan []byte, 0)\n\n\tstd.pub.Subscribe(mc)\n\n\treturn mc, nil\n}", "func (s *Chan) Pipe(rwc io.ReadWriteCloser) {\n\ts.connection = rwc\n\tgo s.readFromReader(rwc)\n\tgo s.writeToWriter(rwc)\n}", "func outputData(outputChannel chan string) {\n\n\tfor {\n\t\tdata := <-outputChannel\n\t\tfmt.Println(data)\n\t}\n}", "func (l *ChannelList) Get(key string) *Channel {\n\t// get a conn bucket\n\tb := l.Bucket(key)\n\tb.Lock()\n\tif c, ok := b.data[key]; ok {\n\t\tb.Unlock()\n\t\tChStat.IncrAccess()\n\t\treturn c\n\t}\n\tb.Unlock()\n\treturn nil\n}", "func (c *requestWillBeSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportConnectionEstablishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (p *pool) get() (*channel, error) {\n\tif p.closed {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\tactiveChannel, ok := <-p.readyChannel\n\tif !ok {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\treturn activeChannel, nil\n}", "func (c *webSocketFrameErrorClient) GetStream() rpcc.Stream { return c.Stream }", "func (k *ChannelKeeper) Channel() *amqp.Channel {\n\treturn k.msgCh\n}", "func (this *FtpsClient) OpenFtpDataChannel(_FtpCommand_S string, _ExpectedReplyCode_i int) (rReplyCode_i int, rReplyMessage_S string, rRts error) {\n\trRts = this.sendRequestToFtpServerDataConn(_FtpCommand_S, _ExpectedReplyCode_i)\n\treturn\n}", "func (c *Client) StreamingDirect(ctx context.Context) (chan Event, error) {\n\treturn c.streaming(ctx, \"direct\", nil)\n}", "func (c *webSocketWillSendHandshakeRequestClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (ch *RingChannel) Out() <-chan interface{} {\n\treturn ch.output\n}", "func (c *remoteConn) OpenChannel(name string, data []byte) (ssh.Channel, error) {\n\tchannel, _, err := c.sconn.OpenChannel(name, data)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn channel, nil\n}", "func (r *realTimer) C() <-chan time.Time {\n\treturn r.timer.C\n}", "func (c *cdcClient) recv() {\n\tc.debug(\"recv call\")\n\tdefer c.debug(\"recv return\")\n\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tc.shutdown(err)\n\t\t}\n\t\tclose(c.events)\n\t}()\n\n\tvar now time.Time\n\tfor {\n\t\t_, bytes, rerr := c.wsConn.ReadMessage()\n\t\tnow = time.Now()\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {\n\t\t\t\terr = rerr\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// CDC events should be the bulk of data we recv, so presume it's that.\n\t\tvar e CDCEvent\n\t\tif err = json.Unmarshal(bytes, &e); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// If event ID is set (not empty), then it's a CDC event as expected\n\t\tif e.Id != \"\" {\n\t\t\tc.debug(\"cdc event: %#v\", e)\n\t\t\tselect {\n\t\t\tcase c.events <- e: // send CDC event to caller\n\t\t\tdefault:\n\t\t\t\tc.debug(\"caller blocked\")\n\t\t\t\tc.shutdown(ErrCallerBlocked)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// It's not a CDC event, so it should be a control message\n\t\t\tvar msg map[string]interface{}\n\t\t\tif err = json.Unmarshal(bytes, &msg); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, ok := msg[\"control\"]; !ok {\n\t\t\t\t// This shouldn't happen: data is not a CDC event or a control message\n\t\t\t\tc.shutdown(ErrBadData)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err = c.control(msg, now); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *Channel) Channels() Channels {\n\treturn c.children\n}", "func (r *chanReader) Read(data []byte) (int, error) {\n\tvar ok bool\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tn := copy(data, r.buf)\n\t\t\tr.buf = r.buf[n:]\n\t\t\tmsg := windowAdjustMsg{\n\t\t\t\tPeersId: r.clientChan.peersId,\n\t\t\t\tAdditionalBytes: uint32(n),\n\t\t\t}\n\t\t\treturn n, r.clientChan.writePacket(marshal(msgChannelWindowAdjust, msg))\n\t\t}\n\t\tr.buf, ok = <-r.data\n\t\tif !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}", "func bufferedChannelTest() {\n\tch := make(chan int, 2)\n\tch <- 1\n\tch <- 2\n\t// ch <- 3 \n\tfmt.Println(<-ch)\n\tfmt.Println(<-ch)\n}" ]
[ "0.6759161", "0.6602864", "0.61977434", "0.6079438", "0.6015766", "0.59815305", "0.5894958", "0.5827573", "0.5811892", "0.5789283", "0.5787107", "0.5769193", "0.5764194", "0.57620907", "0.5744001", "0.57320946", "0.5710497", "0.5659543", "0.56507605", "0.5619693", "0.56017905", "0.55412376", "0.5511171", "0.55090964", "0.5497537", "0.5495607", "0.5485725", "0.54844636", "0.5469122", "0.54626715", "0.544634", "0.5438606", "0.5433448", "0.5426227", "0.5426032", "0.541403", "0.5402727", "0.53927493", "0.5391055", "0.5363698", "0.53503805", "0.53499436", "0.5327847", "0.5324007", "0.53198254", "0.5317453", "0.53115785", "0.5304445", "0.5300533", "0.528647", "0.5275732", "0.5242419", "0.5225599", "0.5221367", "0.5218112", "0.52100986", "0.5196145", "0.5187939", "0.51850754", "0.51704204", "0.516902", "0.51667434", "0.516405", "0.5132859", "0.512949", "0.51228505", "0.51228505", "0.51204574", "0.5118935", "0.5117812", "0.510126", "0.5093387", "0.5090184", "0.5083214", "0.50773835", "0.50705665", "0.5065232", "0.5064193", "0.5061628", "0.5051974", "0.50492626", "0.5042226", "0.5035097", "0.5022692", "0.501978", "0.5019312", "0.5008553", "0.49940854", "0.4989732", "0.4983017", "0.49754548", "0.4958028", "0.49492064", "0.49402693", "0.4940178", "0.49344555", "0.49289474", "0.49203157", "0.49100748", "0.4905373" ]
0.56509525
18
name returns the source name.
func (e *binaryExprEvaluator) name() string { return "" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Source) Name() string {\n\treturn s.SourceName\n}", "func (s *Source) Name() string {\n\treturn \"spyse\"\n}", "func (r Source) GetName() string {\n\treturn r.Name\n}", "func (s *Source) Name() string {\n\treturn \"crtsh\"\n}", "func (e *Event) SourceName() collection.Name {\n\tif e.Source != nil {\n\t\treturn e.Source.Name()\n\t}\n\treturn \"\"\n}", "func (s Source) Name() string { return \"rdt\" }", "func (d *DataPacket) SourceName() string {\n\ti := 44 //the ending index for the string, because it is 0 terminated\n\tfor i < 108 && d.data[i] != 0 {\n\t\ti++\n\t}\n\treturn string(d.data[44:i])\n}", "func (s *Source) GetName() string {\n\treturn s.Name\n}", "func (s *Source) Name() string {\n\treturn \"github\"\n}", "func (s *YAMLFileSource) Name() (name string) {\n\treturn fmt.Sprintf(\"yaml file(%s)\", s.path)\n}", "func (o ResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o CloudHealthcareSourceOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v CloudHealthcareSource) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o *ActionDTO) GetSourceName() string {\n\tif o == nil || o.SourceName == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SourceName\n}", "func (o GetEventSourcesSourceOutput) EventSourceName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetEventSourcesSource) string { return v.EventSourceName }).(pulumi.StringOutput)\n}", "func (o ContainerResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ContainerResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o DataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DataSource) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o *BulletinDTO) GetSourceName() string {\n\tif o == nil || o.SourceName == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SourceName\n}", "func (o *TransactionSplit) GetSourceName() string {\n\tif o == nil || o.SourceName.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SourceName.Get()\n}", "func (s *scraper) Source() string {\n\treturn s.name\n}", "func (g componentSourceGenerator) GetName() string {\n\treturn g.Name\n}", "func (o CloudHealthcareSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *CloudHealthcareSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *Structured) GetName() string {\n\treturn s.cloudEvent.Source\n}", "func (s *CommandLineSource) Name() (name string) {\n\treturn \"command-line\"\n}", "func (o ResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecVolumePersistentVolumeClaimSpecDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumePersistentVolumeClaimSpecDataSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (r *reducer) name() string { return r.stmt.Source.(*Measurement).Name }", "func (o ResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (j *Jsonnet) Name(wantsNameSpaced bool) string {\n\tbase := filepath.Base(j.source)\n\tname := strings.TrimSuffix(base, filepath.Ext(base))\n\tif !wantsNameSpaced {\n\t\treturn name\n\t}\n\n\tif j.module == \"/\" {\n\t\treturn name\n\t}\n\n\treturn path.Join(j.module, name)\n}", "func (o ArgoCDExportSpecStoragePvcDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ArgoCDExportSpecStoragePvcDataSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (src *Tracer) Name() string {\n\treturn src.name\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesSecretOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSourcesSecret) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (s *Data) Source() string {\n\treturn fmt.Sprintf(\"data:%v\", path.Clean(s.Location))\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesSecretOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSourcesSecret) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (s *MapSource) Name() (name string) {\n\treturn \"map\"\n}", "func (*Plugin) SourceFileName() string {\n\treturn sourceFileName\n}", "func (*Plugin) SourceFileName() string {\n\treturn sourceFileName\n}", "func (o ApplicationStatusOperationStateSyncResultSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesSecretPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceProjectedSourcesSecret) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumePersistentVolumeClaimSpecDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumePersistentVolumeClaimSpecDataSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (mySource *Source) Source() (param string) {\n\treturn mySource.Sourcevar\n}", "func (o RegistryTaskSourceTriggerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RegistryTaskSourceTrigger) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o BuildSpecSourceCredentialsOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildSpecSourceCredentials) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o BuildRunStatusBuildSpecSourceCredentialsOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecSourceCredentials) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationSpecSourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ApplicationSpecSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesSecretPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceProjectedSourcesSecret) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *EnvironmentSource) Name() (name string) {\n\treturn \"environment\"\n}", "func (o SourceOutput) DisplayName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Source) pulumi.StringOutput { return v.DisplayName }).(pulumi.StringOutput)\n}", "func (c *auditLog) getName() string {\n\treturn c.name\n}", "func (o CloudHealthcareSourceResponseOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CloudHealthcareSourceResponse) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumePersistentVolumeClaimSpecDataSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumePersistentVolumeClaimSpecDataSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *SecretsSource) Name() (name string) {\n\treturn \"secrets\"\n}", "func (a Asset) source() string {\n\tsource := fileNameWithoutExt(a.PublicID)\n\n\tif !isURL(source) {\n\t\tvar err error\n\t\tsource, err = url.QueryUnescape(strings.Replace(source, \"%20\", \"+\", -1))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tsource = smartEscape(source)\n\n\tif a.Suffix != \"\" {\n\t\tsource += fmt.Sprintf(\"/%s\", a.Suffix)\n\t}\n\n\tif filepath.Ext(a.PublicID) != \"\" {\n\t\tsource += filepath.Ext(a.PublicID)\n\t}\n\n\treturn source\n}", "func (s *SourceImportAuthor) GetName() string {\n\tif s == nil || s.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *s.Name\n}", "func (s *Stream) Name() string { return s.file.Name() }", "func (p ProjectInit) Name() string {\n\treturn string(p)\n}", "func (o ApplicationStatusOperationStateSyncResultSourceHelmParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourceHelmParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (ci converterInfo) Source() string {\n\treturn ci.source\n}", "func (o ResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusHistorySourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusSyncComparedToSourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (fi *fileInfo) Name() string { return fi.name }", "func (o BuildRunStatusBuildSpecSourceCredentialsPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildRunStatusBuildSpecSourceCredentials) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o TargetProjectOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *TargetProject) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (this Intro) name() estring {\n\treturn this.n\n}", "func (dt *Targeter) name() string {\n\tvar id string\n\tif dt.IDs != nil {\n\t\tid = \"{id}\"\n\t}\n\treturn fmt.Sprintf(\"%s %s/%s/%s\", dt.Method, dt.BaseURL, dt.Endpoint, id)\n}", "func (fe *fileEntry) Name() string { return fe.name }", "func (o ApplicationSpecSourceHelmParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourceHelmParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (e *Event) Source() string {\n\treturn e.conn\n}", "func (o ApplicationOperationSyncSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationOperationSyncSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesConfigMapOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSourcesConfigMap) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o LookupServiceIntegrationResultOutput) SourceServiceName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupServiceIntegrationResult) string { return v.SourceServiceName }).(pulumi.StringOutput)\n}", "func (o ApplicationStatusOperationStateSyncResultSourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesConfigMapOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSourcesConfigMap) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (t Type) Source() string {\n\treturn t.source\n}", "func (o *ActionDTO) SetSourceName(v string) {\n\to.SourceName = &v\n}", "func (g GitHub) Name() string {\n\tif g.local != \"\" {\n\t\treturn g.local\n\t}\n\treturn g.binary\n}", "func (p Packet) Name() (name string) {\n\t// todo: think of ways to make this not a compiled in hack\n\t// todo: collectd 4 uses different patterns for some plugins\n\t// https://collectd.org/wiki/index.php/V4_to_v5_migration_guide\n\tswitch p.Plugin {\n\tcase \"df\":\n\t\tname = fmt.Sprintf(\"df_%s_%s\", p.PluginInstance, p.TypeInstance)\n\tcase \"interface\":\n\t\tname = fmt.Sprintf(\"%s_%s\", p.Type, p.PluginInstance)\n\tcase \"load\":\n\t\tname = \"load\"\n\tcase \"memory\":\n\t\tname = fmt.Sprintf(\"memory_%s\", p.TypeInstance)\n\tdefault:\n\t\tname = fmt.Sprintf(\"%s_%s_%s_%s\", p.Plugin, p.PluginInstance, p.Type, p.TypeInstance)\n\t}\n\treturn name\n}", "func (o ApplicationStatusHistorySourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o SiaFileInfo) Name() string {\n\treturn o.FileName\n}", "func (o ApplicationStatusSyncComparedToSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func source() string {\n\treturn \"I am an evil gopher\"\n}", "func (c *withNameAndCode) Name() string {\n\treturn c.name\n}", "func (o BuildSpecSourceCredentialsPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildSpecSourceCredentials) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *BulletinDTO) SetSourceName(v string) {\n\to.SourceName = &v\n}", "func GenVolumeSourceName(source string, index int64) string {\n\treturn source + common.NameSeparator + strconv.FormatInt(index, 10)\n}", "func (e *EDNS) Name() string { return name }", "func fileSource(filename string, i int) string {\n\treturn fmt.Sprintf(\"%s:%d\", filename, i)\n}", "func (ds *Datasource) GetName() string {\n\treturn ds.name\n}", "func (e *Entry) Name() string {\n\tif len(e.path) == 0 {\n\t\treturn \"\"\n\t}\n\treturn e.path[len(e.path)-1]\n}" ]
[ "0.78794163", "0.75167346", "0.7478469", "0.7449441", "0.7437304", "0.7387519", "0.73252875", "0.72747105", "0.7236276", "0.7090298", "0.7044449", "0.7044449", "0.6945361", "0.6810892", "0.68048626", "0.6776871", "0.6776871", "0.6724642", "0.6661173", "0.6651805", "0.6625454", "0.6564692", "0.65482056", "0.6521428", "0.6444199", "0.6427358", "0.6427358", "0.64273053", "0.64019334", "0.63985956", "0.63985956", "0.63970643", "0.63568366", "0.63545203", "0.6345645", "0.6311637", "0.6308424", "0.6303546", "0.6260226", "0.6260226", "0.6247084", "0.6245244", "0.6245244", "0.62372345", "0.62371534", "0.62371534", "0.6222376", "0.6209819", "0.62011534", "0.6200014", "0.61755013", "0.6159335", "0.61291414", "0.6119233", "0.6113662", "0.61117154", "0.61012226", "0.6096734", "0.60674095", "0.6056915", "0.60292953", "0.6028448", "0.60220057", "0.6015895", "0.6010574", "0.5983533", "0.5958791", "0.5958791", "0.5958361", "0.59547806", "0.59520304", "0.59423393", "0.5939665", "0.59377843", "0.59375775", "0.59307474", "0.5929412", "0.5926923", "0.5923438", "0.59232825", "0.59212494", "0.5920124", "0.59168303", "0.59166396", "0.5913426", "0.5906614", "0.5902419", "0.58879215", "0.5887569", "0.58861053", "0.5886017", "0.5874572", "0.58686024", "0.58668345", "0.58668345", "0.5858391", "0.5853577", "0.5842946", "0.5840868", "0.58404166", "0.58368796" ]
0.0
-1
run runs the processor loop to read subprocessor output and combine it.
func (e *binaryExprEvaluator) run() { for { // Read LHS value. lhs, ok := <-e.lhs.C() if !ok { break } // Read RHS value. rhs, ok := <-e.rhs.C() if !ok { break } // Merge maps. m := make(map[string]interface{}) for k, v := range lhs { m[k] = e.eval(v, rhs[k]) } for k, v := range rhs { // Skip value if already processed in lhs loop. if _, ok := m[k]; ok { continue } m[k] = e.eval(float64(0), v) } // Return value. e.c <- m } // Mark the channel as complete. close(e.c) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *aggregator) Run() {\n\tgo a.submitter()\n\n\tfor m := range a.in {\n\t\tfor _, out_m := range a.process(m) {\n\t\t\ta.out <- out_m\n\t\t}\n\t}\n}", "func (p *SingleLineParser) run() {\n\tfor input := range p.inputChan {\n\t\tp.process(input)\n\t}\n\tp.lineHandler.Stop()\n}", "func (p *literalProcessor) run() {\n\tfor {\n\t\tselect {\n\t\tcase ch := <-p.done:\n\t\t\tclose(ch)\n\t\t\treturn\n\t\tcase p.c <- map[string]interface{}{\"\": p.val}:\n\t\t}\n\t}\n}", "func (r *ride) run(ctx context.Context, outc chan<- pipeline.Event) error {\n\tpositions, errc := pipeline.Generate(ctx, r.positions)\n\tsegments, errc1 := pipeline.Reduce(ctx, positions, r.segments)\n\ttotal, err := r.fare(ctx, segments)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrm := pipeline.MergeErrors(ctx, errc, errc1)\n\tfor err := range errm {\n\t\tswitch {\n\t\tcase err == ErrLinesEmpty:\n\t\tcase err != nil:\n\t\t\treturn err\n\t\t}\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\tcase outc <- total:\n\t}\n\n\treturn nil\n}", "func run(arg0 string, args ...string) error {\n\tcmd := exec.Command(arg0, args...)\n\tpipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd.Stderr = cmd.Stdout\n\n\tfmt.Println(\"Running command:\", arg0, strings.Join(args, \" \"))\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Stream the output from r10k as it is generated\n\tscanner := bufio.NewScanner(pipe)\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t\tm := scanner.Text()\n\t\tfmt.Println(m)\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *reducer) run() {\nloop:\n\tfor {\n\t\t// Combine all data from the mappers.\n\t\tdata := make(map[string][]interface{})\n\t\tfor _, m := range r.mappers {\n\t\t\tkv, ok := <-m.C()\n\t\t\tif !ok {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tfor k, v := range kv {\n\t\t\t\tdata[k] = append(data[k], v)\n\t\t\t}\n\t\t}\n\n\t\t// Reduce each key.\n\t\tfor k, v := range data {\n\t\t\tr.fn(k, v, r)\n\t\t}\n\t}\n\n\t// Mark the channel as complete.\n\tclose(r.c)\n}", "func (s *scanner) run() {\n\tfor state := scanMain; state != nil; {\n\t\tstate = state(s)\n\t}\n\tclose(s.items)\n}", "func (r *Runner) run() {\n\tfor {\n\t\ttask := r.rq.Pop()\n\t\tr.process(task)\n\t}\n}", "func (r *processRunner) run(pipeID, componentID string, cancel chan struct{}, in <-chan message, meter *meter) (<-chan message, <-chan error) {\n\terrc := make(chan error, 1)\n\tr.in = in\n\tr.out = make(chan message)\n\tgo func() {\n\t\tdefer close(r.out)\n\t\tdefer close(errc)\n\t\tcall(r.reset, pipeID, errc) // reset hook\n\t\tvar err error\n\t\tvar m message\n\t\tvar ok bool\n\t\tfor {\n\t\t\t// retrieve new message\n\t\t\tselect {\n\t\t\tcase m, ok = <-in:\n\t\t\t\tif !ok {\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tm.applyTo(componentID) // apply params\n\t\t\tm.Buffer, err = r.fn(m.Buffer) // process new buffer\n\t\t\tif err != nil {\n\t\t\t\terrc <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmeter = meter.sample(int64(m.Buffer.Size())).message()\n\n\t\t\tm.feedback.applyTo(componentID) // apply feedback\n\n\t\t\t// send message further\n\t\t\tselect {\n\t\t\tcase r.out <- m:\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn r.out, errc\n}", "func (p *StreamToSubStream) Run() {\n\tdefer p.OutSubStream.Close()\n\n\tscipipe.Debug.Println(\"Creating new information packet for the substream...\")\n\tsubStreamIP := scipipe.NewIP(\"\")\n\tscipipe.Debug.Printf(\"Setting in-port of process %s to IP substream field\\n\", p.Name())\n\tsubStreamIP.SubStream = p.In\n\n\tscipipe.Debug.Printf(\"Sending sub-stream IP in process %s...\\n\", p.Name())\n\tp.OutSubStream.Send(subStreamIP)\n\tscipipe.Debug.Printf(\"Done sending sub-stream IP in process %s.\\n\", p.Name())\n}", "func (runner *McRunner) processOutput() {\n\trunner.WaitGroup.Add(1)\n\tdefer runner.WaitGroup.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-runner.killChannel:\n\t\t\treturn\n\t\tdefault:\n\t\t\tbuf := make([]byte, 256)\n\t\t\tn, err := runner.outPipe.Read(buf)\n\t\t\tstr := string(buf[:n])\n\n\t\t\tif (err == nil) && (n > 1) {\n\t\t\t\tif runner.Settings.PassthroughStdOut {\n\t\t\t\t\tfmt.Print(str)\n\t\t\t\t}\n\t\t\t\tmsgExp, _ := regexp.Compile(\"\\\\[.*\\\\] \\\\[.*INFO\\\\] \\\\[.*DedicatedServer\\\\]: <.*>\")\n\t\t\t\ttpsExp, _ := regexp.Compile(\"\\\\[.*\\\\] \\\\[.*INFO\\\\] \\\\[.*DedicatedServer\\\\]: Dim\")\n\t\t\t\tplayerExp, _ := regexp.Compile(\"\\\\[.*\\\\] \\\\[.*INFO\\\\] \\\\[.*DedicatedServer\\\\]: There are\")\n\t\t\t\tdoneExp, _ := regexp.Compile(\"\\\\[.*\\\\] \\\\[.*INFO\\\\] \\\\[.*DedicatedServer\\\\]: Done\")\n\n\t\t\t\tif runner.State == Starting {\n\t\t\t\t\tif doneExp.Match(buf) {\n\t\t\t\t\t\trunner.State = Running\n\t\t\t\t\t\tfmt.Println(\"Minecraft server done loading.\")\n\t\t\t\t\t}\n\t\t\t\t} else if runner.State == Running {\n\t\t\t\t\tif msgExp.Match(buf) {\n\t\t\t\t\t\trunner.MessageChannel <- str[strings.Index(str, \"<\"):]\n\t\t\t\t\t} else if tpsExp.Match(buf) {\n\t\t\t\t\t\tcontent := str[strings.Index(str, \"Dim\"):]\n\n\t\t\t\t\t\tnumExp, _ := regexp.Compile(\"[+-]?([0-9]*[.])?[0-9]+\")\n\t\t\t\t\t\tnums := numExp.FindAllString(content, -1)\n\t\t\t\t\t\tdim, _ := strconv.Atoi(nums[0])\n\t\t\t\t\t\ttps, _ := strconv.ParseFloat(nums[len(nums)-1], 32)\n\n\t\t\t\t\t\tm := make(map[int]float32)\n\t\t\t\t\t\tm[dim] = float32(tps)\n\n\t\t\t\t\t\trunner.tpsChannel <- m\n\t\t\t\t\t} else if playerExp.Match(buf) {\n\t\t\t\t\t\tcontent := str[strings.Index(str, \"There\"):]\n\n\t\t\t\t\t\tnumExp, _ := regexp.Compile(\"[+-]?([0-9]*[.])?[0-9]+\")\n\t\t\t\t\t\tplayers, _ := strconv.Atoi(numExp.FindString(content))\n\n\t\t\t\t\t\trunner.playerChannel <- players\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *mapper) run() {\n\tfor m.itr.NextIterval() {\n\t\tm.fn(m.itr, m)\n\t}\n\tclose(m.c)\n}", "func (this *service) processor() {\n\tthis.logger.Debugf(\"(%s) Starting processor\", this.cid())\n\n\tthis.wgStarted.Done()\n\tdefer this.wgStopped.Done()\n\n\tfor {\n\t\t// 1. Find out what message is next and the size of the message\n\t\tmtype, total, err := this.peekMessageSize()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error peeking next message size: %v\", this.cid(), err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tmsg, n, err := this.peekMessage(mtype, total)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error peeking next message: %v\", this.cid(), err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t//this.logger.Debugf(\"(%s) Received: %s\", this.cid(), msg)\n\n\t\tthis.inStat.increment(int64(n))\n\n\t\t// 5. Process the read message\n\t\terr = this.processIncoming(msg)\n\t\tif err != nil {\n\t\t\tif err != errDisconnect {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error processing %s: %v\", this.cid(), msg.Name(), err)\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// 7. We should commit the bytes in the buffer so we can move on\n\t\t_, err = this.in.ReadCommit(total)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tthis.logger.Errorf(\"(%s) Error committing %d read bytes: %v\", this.cid(), total, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// 7. Check to see if done is closed, if so, exit\n\t\tif this.isDone() && this.in.Len() == 0 {\n\t\t\treturn\n\t\t}\n\n\t\t//if this.inStat.msgs%1000 == 0 {\n\t\t//\tthis.logger.Debugf(\"(%s) Going to process message %d\", this.cid(), this.inStat.msgs)\n\t\t//}\n\t}\n}", "func runProcessor() {\n\t// process callback is invoked for each message delivered from\n\t// \"example-stream\" topic.\n\tcb := func(ctx goka.Context, msg interface{}) {\n\n\t\t// during the second run, this should break (as value should already be in context)\n\t\tif val := ctx.Value(); val != nil {\n\t\t\tpanic(fmt.Sprintf(\"dealing with a value already in context %v\", ctx.Value()))\n\t\t}\n\n\t\t// store received value in context (first run)\n\t\tctx.SetValue(msg.(string))\n\t\tlog.Printf(\"stored to ctx key = %s, msg = %v\", ctx.Key(), msg)\n\t}\n\n\t// Define a new processor group. The group defines all inputs, outputs, and\n\t// serialization formats. The group-table topic is \"example-group-table\".\n\tg := goka.DefineGroup(group,\n\t\tgoka.Input(topic, new(codec.String), cb),\n\t\tgoka.Persist(new(codec.String)),\n\t)\n\n\tp, err := goka.NewProcessor(brokers, g)\n\tif err != nil {\n\t\tlog.Fatalf(\"error creating processor: %v\", err)\n\t}\n\tif err = p.Run(context.Background()); err != nil {\n\t\tlog.Fatalf(\"error running processor: %v\", err)\n\t}\n}", "func (bf *brainfog) run() {\n\tfor bf.ip < len(bf.program) {\n\t\terr := bf.doInstruction()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tclose(bf.outCh)\n}", "func (c *Pump) run() error {\n\t// FIXME aconway 2015-03-17: error handling\n\tc.waiter.Add(2)\n\tvar readError, writeError error\n\n\tgo func() { // Read\n\t\trbuf, rbuf2 := make([]byte, bufferSize), make([]byte, bufferSize)\n\t\tfor {\n\t\t\trbuf = rbuf[:cap(rbuf)]\n\t\t\tn, err := c.conn.Read(rbuf)\n\t\t\tif n > 0 {\n\t\t\t\tc.read <- rbuf[:n]\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treadError = err\n\t\t\t\tbreak\n\t\t\t}\n\t\t\trbuf, rbuf2 = rbuf2, rbuf // Swap the buffers, fill the one not in use.\n\t\t}\n\t\tclose(c.read)\n\t\tc.waiter.Done()\n\t}()\n\n\tgo func() { // Write\n\t\tfor wbuf := range c.write {\n\t\t\t_, err := c.conn.Write(wbuf)\n\t\t\tif err != nil {\n\t\t\t\twriteError = err\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tc.waiter.Done()\n\t}()\n\n\t// Proton driver loop\n\twbuf, wbuf2 := make([]byte, bufferSize), make([]byte, bufferSize)\n\twbuf = c.pop(wbuf) // First write buffer\n\tfor { // handle pn_transport_t\n\t\tselect {\n\t\tcase buf, ok := <-c.read: // Read a buffer\n\t\t\tif !ok { // Read channel closed\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tc.push(buf)\n\n\t\tcase c.write <- wbuf: // Write a buffer\n\t\t\twbuf, wbuf2 = wbuf2, wbuf // Swap the buffers, fill the unused one.\n\t\t\twbuf = c.pop(wbuf) // Next buffer to write\n\n\t\tcase f := <-c.inject: // Function injected from another goroutine\n\t\t\tf()\n\t\t}\n\t\tc.process() // FIXME aconway 2015-03-17: error handling\n\t}\n\n\tclose(c.write)\n\tc.waiter.Wait() // Wait for read/write goroutines to finish\n\tswitch {\n\tcase readError != nil:\n\t\treturn readError\n\tcase writeError != nil:\n\t\treturn writeError\n\t}\n\treturn nil\n}", "func (r *sinkRunner) run(pipeID, componentID string, cancel chan struct{}, in <-chan message, meter *meter) <-chan error {\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(errc)\n\t\tcall(r.reset, pipeID, errc) // reset hook\n\t\tvar m message\n\t\tvar ok bool\n\t\tfor {\n\t\t\t// receive new message\n\t\t\tselect {\n\t\t\tcase m, ok = <-in:\n\t\t\t\tif !ok {\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tm.params.applyTo(componentID) // apply params\n\t\t\terr := r.fn(m.Buffer) // sink a buffer\n\t\t\tif err != nil {\n\t\t\t\terrc <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmeter = meter.sample(int64(m.Buffer.Size())).message()\n\n\t\t\tm.feedback.applyTo(componentID) // apply feedback\n\t\t}\n\t}()\n\n\treturn errc\n}", "func (l *Clogger) run() {\n\tvar m string\n\tfor m = range l.in {\n\t\tfmt.Fprint(l.w, m)\n\t}\n\treturn\n}", "func (sO *ScreenOutput) Run() {\n\tfor _, channel := range sO.DataInput {\n\t\tgo sO.runChannelInput(channel)\n\t}\n}", "func (p *AsmParser) run() {\n\tdefer close(p.Output)\n\n\tvar errs errorList\n\n\tif p.Error != nil {\n\t\treturn\n\t}\n\n\tvar i asm // instruction, reset to 0 after every write\n\tvar err error\n\tvar d, c, j asm // dest, comp, jump, OR together for final instruction\n\n\twriteResult := func() {\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\n\t\tif errs == nil {\n\t\t\tp.Output <- fmt.Sprintf(\"%.16b\", i)\n\t\t}\n\n\t\ti = 0\n\t}\n\n\tfor index, lex := range p.lexemes {\n\n\t\tswitch lex.instruction {\n\n\t\t// possible edge case, hitting EOF before an EOL\n\t\tcase asmEOF:\n\t\t\tfallthrough\n\n\t\tcase asmEOL:\n\t\t\tprev := p.previousInstruction(index)\n\n\t\t\tif prev.instruction != asmLABEL {\n\t\t\t\twriteResult()\n\t\t\t}\n\n\t\tcase asmAINSTRUCT:\n\t\t\tprev := p.previousInstruction(index)\n\n\t\t\tif prev.instruction == asmAINSTRUCT {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"WARNING - redundant loading of A-Register on line %d\\n\", prev.lineNum)\n\t\t\t}\n\n\t\t\ti, err = p.mapToA(lex)\n\n\t\tcase asmLABEL:\n\t\t\tindex += 2 // skip label and EOL\n\t\t\tcontinue\n\n\t\tcase asmJUMP:\n\t\t\tj, err = mapJmp(lex.value)\n\t\t\ti = i | j\n\n\t\tcase asmCOMP:\n\t\t\tc, err = mapCmp(lex.value)\n\t\t\ti = i | c\n\n\t\tcase asmDEST:\n\t\t\td, err = mapDest(lex.value)\n\t\t\ti = i | d\n\t\t}\n\n\t\tindex++\n\t}\n\n\tp.Error = errs.asError()\n}", "func (o *KinesisOutput) RunOutputLoop() {\n\tdt := &Dnstap{}\n\tfor frame := range o.outputChannel {\n\t\tif err := proto.Unmarshal(frame, dt); err != nil {\n\t\t\tlog.Fatalf(\"dnstap.TextOutput: proto.Unmarshal() failed: %s\\n\", err)\n\t\t\tcontinue\n\t\t}\n\t\tbuf, ok := o.format(dt)\n\t\tif !ok {\n\t\t\tlog.Fatalf(\"dnstap.TextOutput: text format function failed\\n\")\n\t\t\tcontinue\n\t\t}\n\t\t//Send buf to kinesis\n\t\t_, err := o.client.PutRecord(&kinesis.PutRecordInput{\n\t\t\tData: buf,\n\t\t\tStreamName: aws.String(o.streamname),\n\t\t\tPartitionKey: aws.String(o.PartitionKey),\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"aws client PutRecord() failed: %s\\n\", err)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclose(o.wait)\n}", "func (p *spanParser) run() {\n\tfor p.state = parseSpan; p.state != nil; {\n\t\tp.state = p.state(p)\n\t}\n\tclose(p.spanChan)\n}", "func (p *parser) run() {\n\tfor parserState := parseStart; parserState != nil; {\n\t\tparserState = parserState(p)\n\t}\n\tclose(p.records)\n}", "func runProcessor() {\n\t// process callback is invoked for each message delivered from\n\t// \"example-stream\" topic.\n\tcb := func(ctx goka.Context, msg interface{}) {\n\t\tvar counter int64\n\t\t// ctx.Value() gets from the group table the value that is stored for\n\t\t// the message's key.\n\t\tif val := ctx.Value(); val != nil {\n\t\t\tcounter = val.(int64)\n\t\t}\n\t\tcounter++\n\t\t// SetValue stores the incremented counter in the group table for in\n\t\t// the message's key.\n\t\tctx.SetValue(counter)\n\t\tlog.Printf(\"key = %s, counter = %v, msg = %v\", ctx.Key(), counter, msg)\n\t}\n\n\t// Define a new processor group. The group defines all inputs, outputs, and\n\t// serialization formats. The group-table topic is \"example-group-table\".\n\tg := goka.DefineGroup(group,\n\t\tgoka.Input(topic, new(codec.String), cb),\n\t\tgoka.Persist(new(codec.Int64)),\n\t)\n\n\tp, err := goka.NewProcessor(brokers,\n\t\tg,\n\t\tgoka.WithTopicManagerBuilder(goka.TopicManagerBuilderWithTopicManagerConfig(tmc)),\n\t\tgoka.WithConsumerGroupBuilder(goka.DefaultConsumerGroupBuilder),\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error creating processor: %v\", err)\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\tif err = p.Run(ctx); err != nil {\n\t\t\tlog.Printf(\"error running processor: %v\", err)\n\t\t}\n\t}()\n\n\tsigs := make(chan os.Signal)\n\tgo func() {\n\t\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)\n\t}()\n\n\tselect {\n\tcase <-sigs:\n\tcase <-done:\n\t}\n\tcancel()\n\t<-done\n}", "func (c *Cyclone) run() {\n\nrunloop:\n\tfor {\n\t\tselect {\n\t\tcase <-c.Shutdown:\n\t\t\t// received shutdown, drain input channel which will be\n\t\t\t// closed by main\n\t\t\tgoto drainloop\n\t\tcase msg := <-c.Input:\n\t\t\tif msg == nil {\n\t\t\t\t// this can happen if we read the closed Input channel\n\t\t\t\t// before the closed Shutdown channel\n\t\t\t\tcontinue runloop\n\t\t\t}\n\t\t\tif err := c.process(msg); err != nil {\n\t\t\t\tc.Death <- err\n\t\t\t\t<-c.Shutdown\n\t\t\t\tbreak runloop\n\t\t\t}\n\t\t}\n\t}\n\ndrainloop:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-c.Input:\n\t\t\tif msg == nil {\n\t\t\t\t// channel is closed\n\t\t\t\tbreak drainloop\n\t\t\t}\n\t\t\tc.process(msg)\n\t\t}\n\t}\n}", "func (pb *Pubsub) run() {\n\tfor {\n\t\tselect {\n\t\tcase t := <-pb.updateCh.Get():\n\t\t\tpb.updateCh.Load()\n\t\t\tif pb.done.HasFired() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpb.callCallback(t.(*watcherInfoWithUpdate))\n\t\tcase <-pb.done.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (runner *CpuTestRunner) run(cmd string, args ...string) ([]byte, error) {\n\trunner.executions++\n\n\tif runner.executions%2 == 0 {\n\t\treturn []byte(runner.stat2), runner.err1\n\t} else {\n\t\treturn []byte(runner.stat1), runner.err2\n\t}\n\n}", "func (s *Service) run() {\n\n\t// Create a communicator for sending and receiving packets.\n\tcommunicator := comm.NewCommunicator(s.config.PollInterval, s.config.Port)\n\tdefer communicator.Stop()\n\n\t// Create a ticker for sending pings.\n\tpingTicker := time.NewTicker(s.config.PingInterval)\n\tdefer pingTicker.Stop()\n\n\t// Create a ticker for timeout checks.\n\tpeerTicker := time.NewTicker(s.config.PeerTimeout)\n\tdefer peerTicker.Stop()\n\n\t// Create the packet that will be sent to all peers.\n\tpkt := &comm.Packet{\n\t\tID: s.config.ID,\n\t\tUserData: s.config.UserData,\n\t}\n\n\t// Continue processing events until explicitly stopped.\n\tfor {\n\t\tselect {\n\t\tcase p := <-communicator.PacketChan:\n\t\t\ts.processPacket(p)\n\t\tcase <-pingTicker.C:\n\t\t\tcommunicator.Send(pkt)\n\t\tcase <-peerTicker.C:\n\t\t\ts.processPeers()\n\t\tcase <-s.stopChan:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (transmuxer *Transmuxer) Run() {\n\tif transmuxer.closed {\n\t\treturn\n\t}\n\n\tif transmuxer.running {\n\t\treturn\n\t}\n\n\ttransmuxer.running = true\n\n\tfor {\n\t\tvar sample float64\n\n\t\tfor _, streamer := range transmuxer.Streamers {\n\t\t\tnewSample, err := streamer.ReadSample()\n\t\t\tif err != nil {\n\t\t\t\tstreamer.setError(err)\n\t\t\t\tstreamer.Close()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsample += newSample * streamer.Volume\n\t\t}\n\n\t\tsample = sample * transmuxer.MasterVolume\n\n\t\tif transmuxer.FinalStream != nil {\n\t\t\terr := transmuxer.FinalStream.WriteSample(sample)\n\t\t\tif err != nil {\n\t\t\t\ttransmuxer.setError(err)\n\t\t\t\ttransmuxer.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif transmuxer.buffer != nil {\n\t\t\ttransmuxer.buffer = append(transmuxer.buffer, sample)\n\t\t}\n\t}\n}", "func (a *Agent) runProcessors(\n\tunits []*processorUnit,\n) error {\n\tvar wg sync.WaitGroup\n\tfor _, unit := range units {\n\t\twg.Add(1)\n\t\tgo func(unit *processorUnit) {\n\t\t\tdefer wg.Done()\n\n\t\t\tacc := NewAccumulator(unit.processor, unit.dst)\n\t\t\tfor m := range unit.src {\n\t\t\t\tif err := unit.processor.Add(m, acc); err != nil {\n\t\t\t\t\tacc.AddError(err)\n\t\t\t\t\tm.Drop()\n\t\t\t\t}\n\t\t\t}\n\t\t\tunit.processor.Stop()\n\t\t\tclose(unit.dst)\n\t\t\tlog.Printf(\"D! [agent] Processor channel closed\")\n\t\t}(unit)\n\t}\n\twg.Wait()\n\n\treturn nil\n}", "func (mc *MonitorCore) run(runtimeConf RuntimeConfig, stdin io.Reader, stdout io.Writer) error {\n\tmc.logger.Info(\"Starting Python runner child process\")\n\n\tcmd := exec.CommandContext(mc.ctx, runtimeConf.PythonBinary, runtimeConf.PythonArgs...)\n\tcmd.SysProcAttr = procAttrs()\n\tcmd.Stdin = stdin\n\tcmd.Stdout = stdout\n\tcmd.Env = runtimeConf.PythonEnv\n\n\t// Stderr is just the normal output from the Python code that isn't\n\t// specially encoded\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tmc.logger = mc.logger.WithFields(log.Fields{\n\t\t\"runnerPID\": cmd.Process.Pid,\n\t})\n\n\tgo func() {\n\t\tscanner := utils.ChunkScanner(stderr)\n\t\tfor scanner.Scan() {\n\t\t\tmc.logger.Error(scanner.Text())\n\t\t}\n\t}()\n\n\tif err := cmd.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Run() {\n\tloading.Prefix = loadingMsgProcess\n\tloading.Start()\n\n\tvar review []*Review\n\n\tlineCh := make(chan string, 100)\n\n\tgo readFile(inputFile, lineCh)\n\n\tfor line := range lineCh {\n\t\tindex := getIndexPosition(line, delimiter)\n\t\tif index != -1 {\n\t\t\tr, err := unmarshal(line[index:])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"%s => %s\", errorMsgUnmarshal, err)\n\t\t\t}\n\t\t\treview = append(review, r)\n\t\t}\n\t}\n\tloading.Stop()\n\tloading.Prefix = loadingMsgWrite\n\tloading.FinalMSG = loadingMsgComplete\n\tloading.Start()\n\n\tif err := writeOut(review, outputFile); err != nil {\n\t\tlog.Fatalf(\"%s => %s\", errorMsgWriteOut, err)\n\t}\n\tloading.Stop()\n}", "func (k *PluginRunner) run(cmd *cobra.Command, _ []string) error {\n\tm, err := k.generate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := k.transform(m); err != nil {\n\t\treturn err\n\t}\n\n\tif err := k.print(cmd.OutOrStdout(), m); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (nm *NodeMonitor) run(sockPath, bpfRoot string) error {\n\tos.Remove(sockPath)\n\tif err := syscall.Mkfifo(sockPath, 0600); err != nil {\n\t\treturn fmt.Errorf(\"Unable to create named pipe %s: %s\", sockPath, err)\n\t}\n\n\tdefer os.Remove(sockPath)\n\n\tpipe, err := os.OpenFile(sockPath, os.O_RDWR, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to open named pipe for writing: %s\", err)\n\t}\n\n\tdefer pipe.Close()\n\n\tnm.pipeLock.Lock()\n\tnm.pipe = pipe\n\tnm.pipeLock.Unlock()\n\n\tnm.Launcher.SetArgs([]string{\"--bpf-root\", bpfRoot})\n\tif err := nm.Launcher.Run(); err != nil {\n\t\treturn err\n\t}\n\tmetrics.SubprocessStart.WithLabelValues(targetName).Inc()\n\n\tr := bufio.NewReader(nm.GetStdout())\n\tfor nm.GetProcess() != nil {\n\t\tl, err := r.ReadBytes('\\n') // this is a blocking read\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to read stdout from monitor: %s\", err)\n\t\t}\n\n\t\tvar tmp *models.MonitorStatus\n\t\tif err := json.Unmarshal(l, &tmp); err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to unmarshal stdout from monitor: %s\", err)\n\t\t}\n\n\t\tnm.setState(tmp)\n\t}\n\n\treturn fmt.Errorf(\"Monitor process quit unexepctedly\")\n}", "func (p *blockParser) run() {\n\tfor p.state = parseBegin; p.state != nil; {\n\t\tp.state = p.state(p)\n\t}\n\tclose(p.blockChan)\n}", "func run(input string) (part1 string, part2 string) {\n\t// Parse input and return output\n\tmemory := pkg.Parse(input, \",\")\n\t_, result := findMaxPhase(memory)\n\n\tpart1 = fmt.Sprintf(\"%v\", result)\n\t// Parse input and return output\n\t_, result = findFeedbackMaxPhase(memory)\n\tpart2 = fmt.Sprintf(\"%v\", result)\n\treturn\n}", "func (r *Reader) Run(ctx context.Context, outChan chan cortex_chunk.Chunk) {\n\terrChan := make(chan error)\n\tdefer close(outChan)\n\n\treadCtx, cancel := context.WithCancel(ctx)\n\n\t// starting workers\n\tfor i := 0; i < r.cfg.NumWorkers; i++ {\n\t\tr.workerGroup.Add(1)\n\t\tgo r.readLoop(readCtx, outChan, errChan)\n\t}\n\n\tgo func() {\n\t\t// cancel context when an error occurs or errChan is closed\n\t\tdefer cancel()\n\n\t\terr := <-errChan\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\tlogrus.WithError(err).Errorln(\"error scanning chunks, stopping read operation\")\n\t\t\tclose(r.quit)\n\t\t}\n\t}()\n\n\tscanRequests := r.planner.Plan()\n\tlogrus.Infof(\"built %d plans for reading\", len(scanRequests))\n\n\tdefer func() {\n\t\t// lets wait for all workers to finish before we return.\n\t\t// An error in errChan would cause all workers to stop because we cancel the context.\n\t\t// Otherwise closure of scanRequestsChan(which is done after sending all the scanRequests) should make all workers to stop.\n\t\tr.workerGroup.Wait()\n\t\tclose(errChan)\n\t}()\n\n\t// feeding scan requests to workers\n\tfor _, req := range scanRequests {\n\t\tselect {\n\t\tcase r.scanRequestsChan <- req:\n\t\t\tcontinue\n\t\tcase <-r.quit:\n\t\t\treturn\n\t\t}\n\t}\n\n\t// all scan requests are fed, close the channel\n\tclose(r.scanRequestsChan)\n}", "func (recorder *recorderSerialPort) run() {\n\tlog.Print(\"Serial Recorder running\")\n\tfor {\n\t\tselect {\n\n\t\t// Data received to record\n\t\tcase m := <-recorder.write:\n\t\t\t//log.Print(\"Got a recorder write\")\n\t\t\tif recorder.isClosing {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if a new file should be created\n\t\t\t// 18mb max size\n\t\t\tif recorder.fileSize+len(m) >= 1048576*18 {\n\t\t\t\tloadNewFile(recorder)\n\t\t\t}\n\n\t\t\t// Record the data\n\t\t\t//log.Print(\"Record serial data\")\n\t\t\tn, err := recorder.writer.Write(m)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error writing serial data to file: %s\", err.Error())\n\t\t\t}\n\t\t\trecorder.fileSize += n\n\t\t\t//log.Printf(\"Recorded %d bytes to the file\", n)\n\t\t}\n\t}\n}", "func (jr *joinReader) Run(wg *sync.WaitGroup) {\n\terr := jr.mainLoop()\n\tjr.output.Close(err)\n\tif wg != nil {\n\t\twg.Done()\n\t}\n}", "func (p *MultiLineParser) run() {\n\tflushTimer := time.NewTimer(p.flushTimeout)\n\tdefer func() {\n\t\tflushTimer.Stop()\n\t\t// make sure the content stored in the buffer gets sent,\n\t\t// this can happen when the stop is called in between two timer ticks.\n\t\tp.sendLine()\n\t\tp.lineHandler.Stop()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase message, isOpen := <-p.inputChan:\n\t\t\tif !isOpen {\n\t\t\t\t// inputChan has been closed, no more lines are expected\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// process the new line and restart the timeout\n\t\t\tif !flushTimer.Stop() {\n\t\t\t\t// flushTimer.stop() doesn't prevent the timer to tick,\n\t\t\t\t// makes sure the event is consumed to avoid sending\n\t\t\t\t// just one piece of the content.\n\t\t\t\tselect {\n\t\t\t\tcase <-flushTimer.C:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.process(message)\n\t\t\tflushTimer.Reset(p.flushTimeout)\n\t\tcase <-flushTimer.C:\n\t\t\t// no chunk has been collected since a while,\n\t\t\t// the content is supposed to be complete.\n\t\t\tp.sendLine()\n\t\t}\n\t}\n}", "func run(input string) (part1 string, part2 string) {\n\tmoons := Parse(input)\n\tprintln(\"After 0 steps\")\n\tPrintMoons(moons)\n\tfor i := 0; i < 1000; i++ {\n\t\tmoons = Step(moons)\n\t}\n\tprintln(\"After 1000 steps\")\n\tPrintMoons(moons)\n\tprintln(\"Total energy: \", TotalEnergy(moons))\n\t// Parse input and return output\n\tpart1 = fmt.Sprintf(\"%d\", TotalEnergy(moons))\n\t// Parse input and return output\n\tmoons = Parse(input)\n\tpart2 = fmt.Sprintf(\"%d\", findPeriod(moons)*2)\n\treturn\n}", "func (pm *PipelineManager) runWorker() {\n\tfor pm.processNextWorkItem() {\n\t}\n}", "func (g *Gossiper) Run(ctx context.Context) {\n\tsths := make(chan sthInfo, g.bufferSize)\n\n\tvar wg sync.WaitGroup\n\twg.Add(1 + len(g.srcs))\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tglog.Info(\"starting Submitter\")\n\t\tg.Submitter(ctx, sths)\n\t\tglog.Info(\"finished Submitter\")\n\t}()\n\tfor _, src := range g.srcs {\n\t\tgo func(src *sourceLog) {\n\t\t\tdefer wg.Done()\n\t\t\tglog.Infof(\"starting Retriever(%s)\", src.Name)\n\t\t\tsrc.Retriever(ctx, g, sths)\n\t\t\tglog.Infof(\"finished Retriever(%s)\", src.Name)\n\t\t}(src)\n\t}\n\twg.Wait()\n}", "func (ip *ImageProcessor) Run() {\n\tfor {\n\t\tpc := <-ip.Chan\n\t\t// Set R, G, and B\n\t\tip.Image.Pix[(uint32(pc.Y)*800*4)+(uint32(pc.X)*4)] = pc.Red\n\t\tip.Image.Pix[(uint32(pc.Y)*800*4)+(uint32(pc.X)*4)+1] = pc.Green\n\t\tip.Image.Pix[(uint32(pc.Y)*800*4)+(uint32(pc.X)*4)+2] = pc.Blue\n\t\twriteImage(ip.OutFile, &ip.Image)\n\t}\n}", "func (op *compose) run(s stream) stream {\n\tif err := op.validate(op.streams); err != nil {\n\t\ts.err = err\n\t\treturn s\n\t}\n\tif s.streams == nil {\n\t\ts.streams = make([]stream, 0)\n\t}\n\tfor _, str := range op.streams {\n\t\ts.streams = append(s.streams, str.(stream))\n\t}\n\treturn s\n}", "func (m *Manager) run() {\n\tfor i := 0; i < m.workerPool.MaxWorker; i++ {\n\t\twID := i + 1\n\t\t//log.Printf(\"[workerPool] worker %d spawned\", wID)\n\t\tgo func(workerID int) {\n\t\t\tfor task := range m.workerPool.queuedTaskC {\n\t\t\t\tlog.Printf(\"[workerPool] worker %d is processing task\", wID)\n\t\t\t\ttask()\n\t\t\t\tlog.Printf(\"[workerPool] worker %d has finished processing task\", wID)\n\t\t\t}\n\t\t}(wID)\n\t}\n}", "func (a *actorManager) run() error {\n\t// Continually receive messages\n\tfor {\n\t\t// Get next message\n\t\tvar msg actorMessage\n\t\tif err := a.firefox.remote.recv(&msg); err != nil {\n\t\t\tif a.firefox.runCtx.Err() != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\ta.actorsLock.RLock()\n\t\tactor := a.actors[msg.From]\n\t\ta.actorsLock.RUnlock()\n\t\tif actor != nil {\n\t\t\tactor.onMessage(&msg)\n\t\t}\n\t}\n}", "func (lx *lexer) run() {\n\tfor state := lxBase; state != nil; {\n\t\tstate = state(lx)\n\t}\n\tclose(lx.tokStream)\n}", "func (proc *Sink) Run() {\n\tok := true\n\tvar ft *InformationPacket\n\tfor len(proc.inPorts) > 0 {\n\t\tfor i, inp := range proc.inPorts {\n\t\t\tselect {\n\t\t\tcase ft, ok = <-inp.Chan:\n\t\t\t\tif !ok {\n\t\t\t\t\tproc.deleteInPortAtKey(i)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tDebug.Println(\"Received file in sink: \", ft.GetPath())\n\t\t\tdefault:\n\t\t\t\tDebug.Printf(\"No receive on inport %d, so continuing ...\\n\", i)\n\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *stream) process(msg streaminterface.Message) {\n\tc.subsMu.RLock()\n\n\t// Stats\n\t//atomic.AddUint64(&c.InMsgs, 1)\n\n\tq := c.subs[msg.Subject().Domain()]\n\tif q == nil {\n\t\tc.subsMu.RUnlock()\n\t\treturn\n\t}\n\n\tif c.opts.Log {\n\t\tq.logMu.Lock()\n\t\tq.log = append(q.log, msg)\n\t\tq.logMu.Unlock()\n\t}\n\n\tfor _, sub := range q.subs {\n\t\tn := &node{m: msg}\n\t\tsub.mu.Lock()\n\t\tsub.pMsgs++\n\n\t\t// Push onto the async pList for a given subscription\n\t\tif sub.pHead == nil {\n\t\t\tsub.pHead = n\n\t\t\tsub.pTail = n\n\t\t\tsub.pCond.Signal()\n\t\t} else {\n\t\t\tsub.pTail.next = n\n\t\t\tsub.pTail = n\n\t\t}\n\t\tsub.mu.Unlock()\n\t}\n\n\tc.subsMu.RUnlock()\n}", "func (c *Command) run() {\n\tdefer c.done()\n\tlog.Println(\"Executing \", c.Command)\n\tvar oscmd *exec.Cmd\n\n\tif len(c.parsed) > 1 {\n\t\toscmd = exec.Command(c.parsed[0], c.parsed[1:]...)\n\t} else {\n\t\toscmd = exec.Command(c.parsed[0])\n\t}\n\tif c.session.cwd != \"\" {\n\t\toscmd.Dir = c.session.cwd\n\t}\n\n\tstdout, err := oscmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tc.push(c.Id, \"console\", err.Error())\n\t\treturn\n\t}\n\tstderr, err := oscmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tc.push(c.Id, \"console\", err.Error())\n\t\treturn\n\t}\n\n\terr = oscmd.Start()\n\tif err != nil {\n\t\tc.push(c.Id, \"console\", err.Error())\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tc.session.processes[c.Id] = oscmd.Process.Pid\n\n\treader := bufio.NewReader(stdout)\n\treaderErr := bufio.NewReader(stderr)\n\tgo c.readAndPush(readerErr)\n\tc.readAndPush(reader)\n\n\toscmd.Wait()\n}", "func (b *bufferedChan) Run() {\n\tdefer close(b.OutChannel)\n\tfor value := range b.inChannel {\n\t\tselect {\n\t\tcase <-b.ctx.Done():\n\t\t\tfmt.Println(\"Run: Time to return\")\n\t\t\treturn\n\t\tcase b.OutChannel <- value:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func main() {\n\n\tconfig := model.NewConfig()\n\n\tworkElem := make(chan model.SubM)\n\n\tgo produce(workElem, config)\n\n\ttemplItem := consume(workElem)\n\n\ttemplItem.SubModules(config)\n\n}", "func (r *mutationStreamReader) run() {\n\n\t//panic handler\n\tdefer r.panicHandler()\n\n\tfor {\n\t\tselect {\n\n\t\tcase msg, ok := <-r.streamMutch:\n\n\t\t\tif ok {\n\t\t\t\tswitch msg.(type) {\n\t\t\t\tcase []*protobuf.VbKeyVersions:\n\t\t\t\t\tvbKeyVer := msg.([]*protobuf.VbKeyVersions)\n\t\t\t\t\tr.handleVbKeyVersions(vbKeyVer)\n\n\t\t\t\tdefault:\n\t\t\t\t\tr.handleStreamInfoMsg(msg)\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\t//stream library has closed this channel indicating\n\t\t\t\t//unexpected stream closure send the message to supervisor\n\t\t\t\tlogging.Fatalf(\"MutationStreamReader::run Unexpected Mutation \"+\n\t\t\t\t\t\"Channel Close for Stream %v\", r.streamId)\n\t\t\t\tmsgErr := &MsgError{\n\t\t\t\t\terr: Error{code: ERROR_STREAM_READER_STREAM_SHUTDOWN,\n\t\t\t\t\t\tseverity: FATAL,\n\t\t\t\t\t\tcategory: STREAM_READER}}\n\t\t\t\tr.supvRespch <- msgErr\n\t\t\t}\n\n\t\tcase <-r.killch:\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func (v *VCS) runOutput(dir string, cmdline string, kv ...string) ([]byte, error) {\n\treturn v.run1(dir, cmdline, kv, true)\n}", "func (b *QuerySnipBroadcaster) Run() {\n\tfor {\n\t\ts := <-b.in\n\t\tfor _, recipient := range b.recipients {\n\t\t\trecipient <- s\n\t\t}\n\t}\n}", "func (c *MetricsController) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (w *noAggregationStreamWorker) run() {\n\tlog.Debugf(\"Starting streaming routine for the no-aggregation pipeline\")\n\n\tticker := time.NewTicker(noAggWorkerStreamCheckFrequency)\n\tdefer ticker.Stop()\n\tlogPayloads := config.Datadog.GetBool(\"log_payloads\")\n\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\n\tstopped := false\n\tvar stopBlockChan chan struct{}\n\tvar lastStream time.Time\n\n\tfor !stopped {\n\t\tstart := time.Now()\n\t\tserializedSamples := 0\n\n\t\tmetrics.Serialize(\n\t\t\tw.seriesSink,\n\t\t\tw.sketchesSink,\n\t\t\tfunc(seriesSink metrics.SerieSink, sketchesSink metrics.SketchesSink) {\n\t\t\tmainloop:\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\n\t\t\t\t\t// stop signal\n\t\t\t\t\tcase trigger := <-w.stopChan:\n\t\t\t\t\t\tstopped = true\n\t\t\t\t\t\tstopBlockChan = trigger.blockChan\n\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\n\t\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t\tn := time.Now()\n\t\t\t\t\t\tif serializedSamples > 0 && lastStream.Before(n.Add(-time.Second*1)) {\n\t\t\t\t\t\t\tlog.Debug(\"noAggregationStreamWorker: triggering an automatic payloads flush to the forwarder (no traffic since 1s)\")\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\n\t\t\t\t\t// receiving samples\n\t\t\t\t\tcase samples := <-w.samplesChan:\n\t\t\t\t\t\tlog.Debugf(\"Streaming %d metrics from the no-aggregation pipeline\", len(samples))\n\t\t\t\t\t\tfor _, sample := range samples {\n\t\t\t\t\t\t\t// enrich metric sample tags\n\t\t\t\t\t\t\tsample.GetTags(w.taggerBuffer, w.metricBuffer)\n\t\t\t\t\t\t\tw.metricBuffer.AppendHashlessAccumulator(w.taggerBuffer)\n\n\t\t\t\t\t\t\t// turns this metric sample into a serie\n\t\t\t\t\t\t\tvar serie metrics.Serie\n\t\t\t\t\t\t\tserie.Name = sample.Name\n\t\t\t\t\t\t\tserie.Points = []metrics.Point{{Ts: sample.Timestamp, Value: sample.Value}}\n\t\t\t\t\t\t\tserie.Tags = tagset.CompositeTagsFromSlice(w.metricBuffer.Copy())\n\t\t\t\t\t\t\tserie.Host = sample.Host\n\t\t\t\t\t\t\t// ignored when late but mimic dogstatsd traffic here anyway\n\t\t\t\t\t\t\tserie.Interval = 10\n\t\t\t\t\t\t\tw.seriesSink.Append(&serie)\n\n\t\t\t\t\t\t\tw.taggerBuffer.Reset()\n\t\t\t\t\t\t\tw.metricBuffer.Reset()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlastStream = time.Now()\n\n\t\t\t\t\t\tserializedSamples += len(samples)\n\t\t\t\t\t\tif serializedSamples > w.maxMetricsPerPayload {\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, func(serieSource metrics.SerieSource) {\n\t\t\t\tsendIterableSeries(w.serializer, start, serieSource)\n\t\t\t}, func(sketches metrics.SketchesSource) {\n\t\t\t\t// noop: we do not support sketches in the no-agg pipeline.\n\t\t\t})\n\n\t\tif stopped {\n\t\t\tbreak\n\t\t}\n\n\t\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\t}\n\n\tif stopBlockChan != nil {\n\t\tclose(stopBlockChan)\n\t}\n}", "func (b *Builder) run() {\n\tfor {\n\t\ttask := b.bq.Pop()\n\t\tb.process(task)\n\t}\n}", "func processRun(nRequests int, concurrency int, ch chan time.Duration, fun func()) []float64 {\n\tresults := make([]float64, 0, nRequests)\n\n\tn := nRequests\n\tfor n > 0 {\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tif n > 0 {\n\t\t\t\tgo fun()\n\t\t\t\tn--\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tif len(results) < nRequests {\n\t\t\t\tresults = append(results, float64(<-ch)/float64(time.Millisecond))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn results\n}", "func (w *worker) run() {\n\tfor {\n\t\tj, more := <-w.jobs\n\t\tif more {\n\t\t\terr := w.processJob(j)\n\t\t\tw.results <- &jobResult{job: j, err: err}\n\t\t} else {\n\t\t\tw.logger.Info(\"received all jobs, closing worker\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func (n *NodeDrainer) run(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-n.ctx.Done():\n\t\t\treturn\n\t\tcase nodes := <-n.deadlineNotifier.NextBatch():\n\t\t\tn.handleDeadlinedNodes(nodes)\n\t\tcase req := <-n.jobWatcher.Drain():\n\t\t\tn.handleJobAllocDrain(req)\n\t\tcase allocs := <-n.jobWatcher.Migrated():\n\t\t\tn.handleMigratedAllocs(allocs)\n\t\t}\n\t}\n}", "func (tp *TopicProcessor) RunLoop() error {\n\tconsumerChan := tp.getConsumerMessagesChan()\n\tmetricsTicker := time.NewTicker(tp.config.MetricsUpdateInterval)\n\tbatchTicker := time.NewTicker(tp.config.BatchWaitDuration)\n\n\tbatches := tp.getBatches()\n\tlengths := make(map[int]int)\n\n\ttp.logger.Info(\"Entering run loop\")\n\n\tfor {\n\t\tselect {\n\t\tcase consumerMessage := <-consumerChan:\n\t\t\ttp.logger.Debugf(\"Received: %s\", consumerMessage)\n\t\t\tpartition := int(consumerMessage.Partition)\n\t\t\tbatches[partition][lengths[partition]] = consumerMessage\n\t\t\tlengths[partition]++\n\t\t\tif lengths[partition] == tp.config.BatchSize {\n\t\t\t\ttp.logger.Debugf(\"Processing batch of %d messages...\", tp.config.BatchSize)\n\t\t\t\terr := tp.processConsumerMessages(batches[partition], partition)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttp.onClose(metricsTicker, batchTicker)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlengths[partition] = 0\n\t\t\t\ttp.logger.Debug(\"Processing of batch complete\")\n\t\t\t}\n\t\tcase <-metricsTicker.C:\n\t\t\ttp.onMetricsTick()\n\t\tcase <-batchTicker.C:\n\t\t\tfor _, partition := range tp.partitions {\n\t\t\t\tif lengths[partition] == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttp.logger.Debugf(\"Processing batch of %d messages...\", lengths[partition])\n\t\t\t\terr := tp.processConsumerMessages(batches[partition][0:lengths[partition]], partition)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttp.onClose(metricsTicker, batchTicker)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlengths[partition] = 0\n\t\t\t\ttp.logger.Debug(\"Processing of batch complete\")\n\t\t\t}\n\t\tcase <-tp.close:\n\t\t\ttp.onClose(metricsTicker, batchTicker)\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (r *pumpRunner) run(pipeID, componentID string, cancel <-chan struct{}, provide chan<- struct{}, consume <-chan message, meter *meter) (<-chan message, <-chan error) {\n\tout := make(chan message)\n\terrc := make(chan error, 1)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\tdefer close(errc)\n\t\tcall(r.reset, pipeID, errc) // reset hook\n\t\tvar err error\n\t\tvar m message\n\t\tvar done bool // done flag\n\t\tfor {\n\t\t\t// request new message\n\t\t\tselect {\n\t\t\tcase provide <- do:\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// receive new message\n\t\t\tselect {\n\t\t\tcase m = <-consume:\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tm.applyTo(componentID) // apply params\n\t\t\tm.Buffer, err = r.fn() // pump new buffer\n\t\t\tif err != nil {\n\t\t\t\tswitch err {\n\t\t\t\tcase io.EOF:\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\treturn\n\t\t\t\tcase io.ErrUnexpectedEOF:\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\tdone = true\n\t\t\t\tdefault:\n\t\t\t\t\terrc <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tmeter = meter.sample(int64(m.Buffer.Size())).message()\n\t\t\tm.feedback.applyTo(componentID) // apply feedback\n\n\t\t\t// push message further\n\t\t\tselect {\n\t\t\tcase out <- m:\n\t\t\t\tif done {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn out, errc\n}", "func Run() {\n\trscs := ParseAll(context.Background(), resourceFiles, pkg)\n\tb, err := proto.Marshal(rscs)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err = ioutil.WriteFile(rPbOutput, b, 0644); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (l *lexer) run() {\nmainLoop:\n\tfor {\n\t\tif !processWhitespace(l) {\n\t\t\tbreak\n\t\t}\n\t\t//fmt.Println(\"testing\", string(l.peek()))\n\t\tfound := false\n\tprocessLoop:\n\t\tfor _, processFunc := range processFunctions {\n\t\t\t//fmt.Println(\"func =\", processFunc)\n\t\t\tresult := processFunc(l)\n\t\t\t//fmt.Println(\"peek = \", string(l.peek()))\n\t\t\tswitch result {\n\t\t\tcase resultMatch:\n\t\t\t\tfound = true\n\t\t\t\tbreak processLoop\n\t\t\tcase resultMatchError:\n\t\t\t\tbreak mainLoop\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tl.errorf(\"Invalid token: '%s'\", string(l.peek()))\n\t\t\tbreak\n\t\t}\n\t}\n\tl.emit(itemEOF)\n\tclose(l.items)\n}", "func runPipeline(fname string) map[string]bool {\n\t//store results of the pipeline\n\tresults := make(map[string]bool)\n\n\t//parseFile\n\t//make call to datacleaner microservice here\n\t//We won't call data clean in the pipeline, instead, we will trigger data clean\n\t//before this pipeline\n\n\t//count lines\n\tresults[\"Count Lines\"] = countLines(fname)\n\n\t//count browsers\n\t//make call to browserCounts microservice here\n\tresults[\"Count Browsers\"] = countBrowser(fname)\n\n\t//count visitors\n\t//make call to visitorCounts microservice here\n\tresults[\"Count Visitor\"] = countVisitor(fname)\n\n\t//count websites\n\t//make call to websiteCounter microservice here\n\tresults[\"Count Websites\"] = countWebsite(fname)\n\n\treturn results\n}", "func (r *Reader) run() {\n\tdefer r.cancel()\n\tdefer close(r.done)\n\tdefer r.stmt.Close()\n\n\tvar err error\n\n\tfor err == nil {\n\t\terr = r.tick()\n\t}\n\n\tif err != context.Canceled {\n\t\tr.done <- err\n\t}\n}", "func (p *literalProcessor) start() { go p.run() }", "func (p *Printer) run() {\n\tdefer close(p.ch)\n\tconn, err := p.ln.Accept()\n\tif err != nil {\n\t\treturn\n\t}\n\tp.conn = conn\n\n\t// If Close() has been called, close the connection.\n\tif atomic.SwapInt32(&p.state, 2) == 1 {\n\t\tconn.Close()\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn\n\t}\n\tp.ch <- data\n}", "func (mgr *manager) run() {\n\tlog(mgr.reportingTo.Name(), \"working\", nil, false)\n\tdefer log(mgr.reportingTo.Name(), \"all done\", nil, false)\n\tstepFn := mgr.step_Accepting\n\tfor {\n\t\tif stepFn == nil {\n\t\t\tbreak\n\t\t}\n\t\tstepFn = stepFn()\n\t}\n}", "func (f *Function) processResults() {\n\tdefer f.wg.Done()\n\tvar otherClosed bool\n\tfor {\n\t\tselect {\n\t\tcase res, ok := <-f.output:\n\t\t\tif !ok {\n\t\t\t\tif otherClosed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\totherClosed = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf.resHandler(res)\n\n\t\tcase err, ok := <-f.errch:\n\t\t\tif !ok {\n\t\t\t\tif otherClosed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\totherClosed = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf.errHandler(err)\n\t\t}\n\t}\n}", "func (this *Connection) run() {\n\tgo this.routineMain()\n}", "func (a *Agent) runOutputs(\n\tunit *outputUnit,\n) error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tfor _, output := range unit.outputs {\n\t\terr := a.RunSingleOutput(output, ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor metric := range unit.src {\n\t\tfor i, output := range unit.outputs {\n\t\t\ta.Config.OutputsLock.Lock()\n\t\t\tif i == len(a.Config.Outputs)-1 {\n\t\t\t\toutput.AddMetric(metric)\n\t\t\t} else {\n\t\t\t\toutput.AddMetric(metric.Copy())\n\t\t\t}\n\t\t\ta.Config.OutputsLock.Unlock()\n\t\t}\n\t}\n\n\tlog.Println(\"I! [agent] Hang on, flushing any cached metrics before shutdown\")\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (r *Runner) generate(output chan Result, wg *sizedwaitgroup.SizedWaitGroup) {\n\tif r.options.TargetUrl != \"\" {\n\t\tlog.Info(fmt.Sprintf(\"single target: %s\", r.options.TargetUrl))\n\t\twg.Add()\n\t\tgo r.process(output, r.options.TargetUrl, wg)\n\t} else {\n\t\turls, err := ReadFile(r.options.UrlFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Cann't read url file\")\n\t\t} else {\n\t\t\tlog.Info(fmt.Sprintf(\"Read %d's url totaly\", len(urls)))\n\t\t\tfor _, u := range urls {\n\t\t\t\twg.Add()\n\t\t\t\tgo r.process(output, u, wg)\n\t\t\t}\n\t\t}\n\t}\n}", "func run(input string) (interface{}, interface{}) {\n\tts, busList := parse(input)\n\n\tpart1, part2 := 0, 0\n\tmod := 1\n\tminID := 0\n\tfor i, b := range busList {\n\t\tfreq := b.ID - ts%b.ID\n\t\tif freq < busList[minID].ID-ts%busList[minID].ID {\n\t\t\tpart1 = b.ID * freq\n\t\t\tminID = i\n\t\t}\n\n\t\tfor (part2+b.Offset)%b.ID != 0 {\n\t\t\tpart2 += mod\n\t\t}\n\t\tmod *= b.ID\n\t}\n\treturn part1, part2\n}", "func (p *Probe) loop() {\n\tdefer close(p.stopped)\n\n\t// Do a first probe right away, so that the prober immediately exports results for everything.\n\tp.run()\n\tfor {\n\t\tselect {\n\t\tcase <-p.tick.Chan():\n\t\t\tp.run()\n\t\tcase <-p.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (w *worker) runWorker() {\n\tfor w.processNextItem() {\n\t}\n}", "func Run() ([]*collectors.MetricResult, error) {\n\tvar (\n\t\tdata []byte\n\t\terr error\n\t)\n\tif data, err = loader(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn preformatter(data)\n}", "func (b *PackByCount) Run() {\n\tvar batch []interface{}\n\tvar packSize int\n\n\tfor {\n\t\tselect {\n\t\tcase ruleI := <-b.inrule:\n\t\t\tpackSizeTmp, err := util.ParseFloat(ruleI, \"MaxCount\")\n\t\t\tif err != nil {\n\t\t\t\tb.Error(\"error parsing batch size\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tpackSize = int(packSizeTmp)\n\t\t\tbatch = nil\n\n\t\tcase <-b.quit:\n\t\t\t// quit the block\n\t\t\treturn\n\t\tcase m := <-b.in:\n\t\t\tif packSize == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif len(batch) == packSize {\n\t\t\t\tb.out <- map[string]interface{}{\n\t\t\t\t\t\"Pack\": batch,\n\t\t\t\t}\n\t\t\t\tbatch = nil\n\t\t\t}\n\n\t\t\tbatch = append(batch, m)\n\n\t\tcase <-b.clear:\n\t\t\tbatch = nil\n\t\tcase <-b.flush:\n\t\t\tb.out <- map[string]interface{}{\n\t\t\t\t\"Pack\": batch,\n\t\t\t}\n\t\t\tbatch = nil\n\t\tcase r := <-b.queryrule:\n\t\t\tr <- map[string]interface{}{\n\t\t\t\t\"MaxCount\": packSize,\n\t\t\t}\n\t\t}\n\t}\n}", "func (a *Aggregator) Run() {\n\tevents := make(chan []byte)\n\turls := make(chan URL)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t// Catch SIGINT/SIGTERM signals and call cancel() before exiting to\n\t// gracefully stop goroutines\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)\n\n\tgo func() {\n\t\t<-signalCh\n\t\tcancel()\n\t\tos.Exit(1)\n\t}()\n\n\t// Run an event listener goroutine, compute aggregation on `ServerStatus`\n\t// events coming from the message queue\n\tgo func(ctx context.Context) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-events:\n\t\t\t\tvar status ServerStatus\n\t\t\t\terr := json.Unmarshal(event, &status)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.logger.Println(\"Error decoding status event\")\n\t\t\t\t} else {\n\t\t\t\t\ta.aggregate(&status)\n\t\t\t\t\turls <- status.Url\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\ta.mq.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctx)\n\n\t// Just print results of aggreation fo each received URL\n\tgo func(ctx context.Context) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase url := <-urls:\n\t\t\t\tstats, _ := a.servers[url]\n\t\t\t\ta.logger.Printf(\"%s alive=%v avail.(%%)=%.2f res(ms)=%v min(ms)=%v max(ms)=%v avg(ms)=%v status_codes=%v\\n\",\n\t\t\t\t\turl, stats.Alive, stats.Availability,\n\t\t\t\t\tstats.LatestResponseTime, stats.MovingAverageStats.Min(),\n\t\t\t\t\tstats.MovingAverageStats.Max(), stats.MovingAverageStats.Mean(),\n\t\t\t\t\tstats.ResponseStatusMap)\n\t\t\t\t// Send stats to presenter\n\t\t\t\tpresenterStats := Stats{\n\t\t\t\t\tUrl: url,\n\t\t\t\t\tAlive: stats.Alive,\n\t\t\t\t\tAvgResponseTime: stats.MovingAverageStats.Mean(),\n\t\t\t\t\tAvailability: stats.Availability,\n\t\t\t\t\tStatusCodes: stats.ResponseStatusMap,\n\t\t\t\t}\n\t\t\t\tpayload, err := json.Marshal(presenterStats)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.logger.Println(\"Unable to marshal presenter stats\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ta.mq.Produce(\"stats\", payload)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctx)\n\n\tif err := a.mq.Consume(\"urlstatus\", 1, events); err != nil {\n\t\ta.logger.Fatal(err)\n\t}\n}", "func (route *GrafanaNet) run(in chan []byte) {\n\tvar metrics []*schema.MetricData\n\tbuffer := new(bytes.Buffer)\n\n\ttimer := time.NewTimer(route.Cfg.FlushMaxWait)\n\tfor {\n\t\tselect {\n\t\tcase buf := <-in:\n\t\t\troute.numBuffered.Dec(1)\n\t\t\tmd, err := parseMetric(buf, route.schemas, route.Cfg.OrgID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"RouteGrafanaNet: parseMetric failed: %s. skipping metric\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmd.SetId()\n\t\t\tmetrics = append(metrics, md)\n\n\t\t\tif len(metrics) == route.Cfg.FlushMaxNum {\n\t\t\t\tmetrics = route.retryFlush(metrics, buffer)\n\t\t\t\t// reset our timer\n\t\t\t\tif !timer.Stop() {\n\t\t\t\t\t<-timer.C\n\t\t\t\t}\n\t\t\t\ttimer.Reset(route.Cfg.FlushMaxWait)\n\t\t\t}\n\t\tcase <-timer.C:\n\t\t\ttimer.Reset(route.Cfg.FlushMaxWait)\n\t\t\tmetrics = route.retryFlush(metrics, buffer)\n\t\tcase <-route.shutdown:\n\t\t\tmetrics = route.retryFlush(metrics, buffer)\n\t\t\treturn\n\t\t}\n\t}\n\troute.wg.Done()\n}", "func (jr *joinReader) Run(wg *sync.WaitGroup) {\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\n\tctx := log.WithLogTagInt(jr.flowCtx.Ctx, \"JoinReader\", int(jr.desc.ID))\n\tctx, span := processorSpan(ctx, \"join reader\")\n\tdefer tracing.FinishSpan(span)\n\n\terr := jr.mainLoop(ctx)\n\tif err != nil {\n\t\tDrainAndClose(ctx, jr.out.output, err /* cause */, jr.pushTrailingMeta, jr.input)\n\t}\n}", "func (lp *loop) Run() (buffer string, err error) {\n\tfor {\n\t\tvar flag redrawFlag\n\t\tif lp.extractRedrawFull() {\n\t\t\tflag |= fullRedraw\n\t\t}\n\t\tlp.redrawCb(flag)\n\t\tselect {\n\t\tcase event := <-lp.inputCh:\n\t\t\t// Consume all events in the channel to minimize redraws.\n\t\tconsumeAllEvents:\n\t\t\tfor {\n\t\t\t\tlp.handleCb(event)\n\t\t\t\tselect {\n\t\t\t\tcase ret := <-lp.returnCh:\n\t\t\t\t\tlp.redrawCb(finalRedraw)\n\t\t\t\t\treturn ret.buffer, ret.err\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase event = <-lp.inputCh:\n\t\t\t\t\t// Continue the loop of consuming all events.\n\t\t\t\tdefault:\n\t\t\t\t\tbreak consumeAllEvents\n\t\t\t\t}\n\t\t\t}\n\t\tcase ret := <-lp.returnCh:\n\t\t\tlp.redrawCb(finalRedraw)\n\t\t\treturn ret.buffer, ret.err\n\t\tcase <-lp.redrawCh:\n\t\t}\n\t}\n}", "func (r fifo) Run(ctx context.Context, params StageParams) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase payloadIn, ok := <-params.Input():\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpayloadOut, err := r.proc.Process(ctx, payloadIn)\n\t\t\tif err != nil {\n\t\t\t\twrappedErr := xerrors.Errorf(\"pipeline stage %d : %w \", params.StageIndex(), err)\n\t\t\t\tmaybeEmitError(wrappedErr, params.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif payloadOut == nil {\n\t\t\t\tpayloadIn.MarkAsProcessed()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase params.Output() <- payloadOut:\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (w *Worker) run(){\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase f := <- w.task:\n\t\t\t\tif f == nil {\n\t\t\t\t\tw.pool.decRunning()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tf()\n\t\t\t\tw.pool.putWorker(w)\n\t\t\tcase args := <- w.args:\n\t\t\t\tif args == nil {\n\t\t\t\t\tw.pool.decRunning()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tw.pool.poolFunc(args)\n\t\t\t\tw.pool.putWorker(w)\n\t\t\t}\n\t\t}\n\t}()\n}", "func (p *Pipe) start() {\n\tp.cancel = make(chan struct{})\n\terrcList := make([]<-chan error, 0, 1+len(p.processors)+len(p.sinks))\n\t// start pump\n\tout, errc := p.pump.run(p.cancel, p.ID(), p.provide, p.consume, p.sampleRate, p.metric)\n\terrcList = append(errcList, errc)\n\n\t// start chained processesing\n\tfor _, proc := range p.processors {\n\t\tout, errc = proc.run(p.cancel, p.ID(), out, p.sampleRate, p.metric)\n\t\terrcList = append(errcList, errc)\n\t}\n\n\tsinkErrcList := p.broadcastToSinks(out)\n\terrcList = append(errcList, sinkErrcList...)\n\tp.errc = mergeErrors(errcList...)\n}", "func (bl *LogBuffer) run() {\n\tfor {\n\t\tmsg, err := bl.ringBuffer.Pop()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif err := bl.logger.WriteLogMessage(msg); err != nil {\n\t\t\tlogrus.Debugf(\"failed to write log %v with log driver %s\", msg, bl.logger.Name())\n\t\t}\n\t}\n}", "func main() {\n\tgo produce()\n\tgo consume()\n\t<-done\n}", "func (i *instanceManager) run() {\n\t// Dispense once to ensure we are given a valid plugin\n\tif _, err := i.dispense(); err != nil {\n\t\ti.logger.Error(\"dispensing initial plugin failed\", \"error\", err)\n\t\treturn\n\t}\n\n\t// Create a waitgroup to block on shutdown for all created goroutines to\n\t// exit\n\tvar wg sync.WaitGroup\n\n\t// Start the fingerprinter\n\twg.Add(1)\n\tgo func() {\n\t\ti.fingerprint()\n\t\twg.Done()\n\t}()\n\n\t// Start event handler\n\twg.Add(1)\n\tgo func() {\n\t\ti.handleEvents()\n\t\twg.Done()\n\t}()\n\n\t// Do a final cleanup\n\twg.Wait()\n\ti.cleanup()\n}", "func (lt *Logtailer) Run(numWorkers int) (*Stats, error) {\n\tinput, err := lt.getInput()\n\tstats := &Stats{}\n\n\tif err != nil {\n\t\tlt.Logger.Println(\"error getting logtail input:\", err)\n\t\treturn stats, err\n\t}\n\tscanner := bufio.NewScanner(input)\n\tinputRecords := make(chan string)\n\toutputRecords := make(chan interface{})\n\n\t// run any initialization routines needed by the profile\n\terr = lt.Profile.Init()\n\tif err != nil {\n\t\treturn stats, err\n\t}\n\n\t// start scanner goroutine\n\tgo func() {\n\t\tdefer close(inputRecords)\n\t\t// if the profile supplies a custom splitting function, use it\n\t\tif splitter, ok := lt.Profile.(Splitter); ok {\n\t\t\tscanner.Split(splitter.Split)\n\t\t}\n\n\t\tfor scanner.Scan() {\n\t\t\t// hand every token to inputRecords to be consumed by the profile\n\t\t\tstats.Records++\n\t\t\tselect {\n\t\t\tcase inputRecords <- scanner.Text():\n\t\t\tcase <-lt.shutdown:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < numWorkers; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor {\n\t\t\t\tline, ok := <-inputRecords\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trecord, err := lt.Profile.ProcessRecord(line)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlt.Logger.Println(\"error parsing:\", err)\n\t\t\t\t\tstats.Lock()\n\t\t\t\t\tstats.ParseErrors++\n\t\t\t\t\tstats.Unlock()\n\t\t\t\t} else {\n\t\t\t\t\toutputRecords <- record\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\terrorChan := lt.Profile.HandleOutput(outputRecords, lt.DryRun)\n\n\tgo func() {\n\t\tfor err := range errorChan {\n\t\t\tstats.Lock()\n\t\t\tstats.SendErrors++\n\t\t\tstats.Unlock()\n\t\t\tlt.Logger.Println(\"error sending:\", err)\n\t\t}\n\t}()\n\twg.Wait()\n\tclose(outputRecords)\n\n\tif stats.IsHealthy() {\n\t\terr = nil\n\t} else {\n\t\terr = fmt.Errorf(\"stats indicate unhealthy run: %+v\", stats)\n\t}\n\treturn stats, err\n}", "func run() {\n\n\tvar amdata amData\n\tget(&amdata)\n\tmergeData(amdata)\n\tsortAlert()\n\tfilter()\n\n\tif *jsonOutput {\n\t\tif name != \"\" {\n\t\t\tjsonPrintDetails()\n\t\t} else {\n\t\t\tjsonPrint()\n\t\t}\n\t} else {\n\t\tif name != \"\" {\n\t\t\tdetailPrint()\n\t\t} else {\n\t\t\ttabulate()\n\t\t}\n\t}\n}", "func (o BuildSpecRuntimeOutput) Run() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BuildSpecRuntime) []string { return v.Run }).(pulumi.StringArrayOutput)\n}", "func (collector *Collector) process() {\n\t// Signal that the collector has stopped when we return.\n\tdefer func() {\n\t\tclose(collector.Stopped)\n\t}()\n\n\tlogp.Info(\"Starting collector processing\")\n\n\t// What we'll use for keeping track of Timeout.Once, so that a command only executes once\n\t// between pattern matches and not at an interval\n\ttimedOutOnce := false\n\n\t// Continuously select over our channels and signals waiting for an event\n\tfor {\n\t\tselect {\n\t\tcase msg := <-collector.lines:\n\t\t\t// We've gotten a new log line\n\t\t\tlogp.Debug(\"log-pulse\", \"Collector received message: %s\", msg)\n\t\t\tif collector.Pattern.MatchString(msg) {\n\t\t\t\tlogp.Debug(\"log-pulse\", \"Message matches pattern\")\n\n\t\t\t\t// The line matches our pattern so reset our timeout\n\t\t\t\tcollector.resetTimeout()\n\n\t\t\t\t// Reset our timedOutOnce so that another timeout command can execute\n\t\t\t\ttimedOutOnce = false\n\n\t\t\t\t// If a command is configured to be run on pattern matches execute it\n\t\t\t\tif collector.config.Command.Program != \"\" {\n\t\t\t\t\tlogp.Info(\"Running pattern match command...\")\n\t\t\t\t\tcollector.config.Command.Start()\n\t\t\t\t}\n\t\t\t}\n\t\tcase t := <-collector.timeoutChannel:\n\t\t\tlogp.Debug(\"log-pulse\", \"Timed Out\", t)\n\n\t\t\t// Our ticker has timed-out\n\t\t\t// Only do anything if there's an actual timeout command configured\n\t\t\tif collector.config.Timeout.Command.Program != \"\" {\n\t\t\t\tif !(timedOutOnce && collector.config.Timeout.Once) {\n\t\t\t\t\t// Only run our command if TimeoutOnce isn't set or, if it is,\n\t\t\t\t\t// only if we haven't run the command yet.\n\t\t\t\t\tlogp.Info(\"Running timeout command...\")\n\t\t\t\t\tcollector.config.Timeout.Command.Start()\n\t\t\t\t}\n\t\t\t}\n\t\t\ttimedOutOnce = true\n\t\tcase <-collector.Done:\n\t\t\t// We got a shutdown signal\n\t\t\tlogp.Info(\"Collector received shutdown signal and is going to close\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func (tfm *trxFlowMonitor) run() {\n\t// make sure we are orchestrated\n\tif tfm.mgr == nil {\n\t\tpanic(fmt.Errorf(\"no svc manager set on %s\", tfm.name()))\n\t}\n\n\t// start go routine for processing\n\ttfm.mgr.started(tfm)\n\tgo tfm.execute()\n}", "func (s *Scavenger) run() {\n\tdefer func() {\n\t\ts.emitStats()\n\t\tgo s.Stop()\n\t\ts.stopWG.Done()\n\t}()\n\n\t// Start a task to delete orphaned tasks from the tasks table, if enabled\n\tif s.cleanOrphans() {\n\t\ts.executor.Submit(&orphanExecutorTask{scvg: s})\n\t}\n\n\tvar pageToken []byte\n\tfor {\n\t\tresp, err := s.listTaskList(taskListBatchSize, pageToken)\n\t\tif err != nil {\n\t\t\ts.logger.Error(\"listTaskList error\", tag.Error(err))\n\t\t\treturn\n\t\t}\n\n\t\tfor _, item := range resp.Items {\n\t\t\tatomic.AddInt64(&s.stats.tasklist.nProcessed, 1)\n\t\t\tif !s.executor.Submit(s.newTask(&item)) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tpageToken = resp.NextPageToken\n\t\tif pageToken == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\ts.awaitExecutor()\n}", "func genOutput(inPath, outPath, voiceType string) {\n\tfmt.Println(\"Processing ...\")\n\tvar wg sync.WaitGroup\n\tcontent, err := utils.ReadPdf(inPath)\n\tfmt.Println(\"Content from Pdf ::\", content)\n\tutils.FatalErr(err)\n\tconst maxLen = 10000\n\tsplits := SplitStr(content, maxLen)\n\tfor i, v := range splits {\n\t\twg.Add(1)\n\t\tgo utils.GenAudio(v, voiceType, outPath, i, &wg)\n\t}\n\twg.Wait()\n}", "func (inst *Instance) Run(input map[string]interface{}) (output map[string]interface{}, err error) {\n\n\t// Get the Scope of the CML pipeline.\n\t// Scope is the collection of the data in the CML\n\tscope, err := NewPipelineScope(input, inst.def.labels)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Log the time\n\tstart := time.Now()\n\n\t//Check the type of the input of the pipeline.\n\tfor key, _ := range inst.def.input {\n\n\t\ttemp, ok := inst.def.input[key].(PipelineInput)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = types.ValidateType(temp.Type, input[key])\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t}\n\n\t//Run the tasks.\n\tfor key, task := range inst.def.tasks {\n\t\ttask.Position()\n\t\tscope, err = task.Eval(scope, inst.logger)\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error %s in task \\\"%s-%v\\\" \", err.Error(), task.Name(), key)\n\t\t}\n\n\t}\n\n\t// Set the output.\n\n\tif inst.def.output.Data != nil {\n\t\tmf := GetMapperFactory()\n\t\tmappings := make(map[string]interface{})\n\n\t\t// Type Switch\n\t\tswitch t := inst.def.output.Data.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tfor key, val := range t {\n\t\t\t\tmappings[key] = val\n\t\t\t}\n\t\tdefault:\n\t\t\tmappings[\"data\"] = inst.def.output.Data\n\t\t}\n\n\t\t// Get the data from output expression\n\t\toutMapper, err := mf.NewMapper(mappings)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toutput, err = outMapper.Apply(scope)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar definedType data.Type\n\n\t\t// Check if the output is defined as dataframe or map.\n\t\tif inst.def.output.Type == \"dataframe\" || inst.def.output.Type == \"map\" {\n\t\t\tdefinedType, err = data.ToTypeEnum(\"object\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tgivenType, err := data.GetType(output)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif definedType != givenType {\n\t\t\t\treturn nil, fmt.Errorf(\"Type mismatch in output. Defined type [%s] passed type [%s]\", definedType, givenType)\n\t\t\t}\n\n\t\t\tinst.logger.Infof(\"The output took %v to calculate\", time.Since(start))\n\n\t\t\treturn output, nil\n\t\t}\n\n\t\tdefinedType, _ = data.ToTypeEnum(inst.def.output.Type)\n\n\t\tfor key, _ := range output {\n\n\t\t\tgivenType, err := data.GetType(output[key])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif definedType != givenType {\n\t\t\t\treturn nil, fmt.Errorf(\"Type mismatch in output. Defined type [%s] passed type [%s]\", definedType, givenType)\n\t\t\t}\n\t\t}\n\n\t}\n\tinst.logger.Infof(\"The output took %v to calculate\", time.Since(start))\n\n\treturn output, nil\n\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextItem() {\n\t}\n}", "func (p *Producer) Run() {\n\tp.wg.Add(1)\n\tdefer p.wg.Done()\n\n\tsendMsg := func(routingKey string, data []byte) {\n\t\ttimeStamp := time.Now()\n\t\terr := p.rabbitChannel.Publish(\n\t\t\tp.rabbitExchange,\n\t\t\troutingKey,\n\t\t\tfalse,\n\t\t\tfalse,\n\t\t\tamqp.Publishing{\n\t\t\t\tDeliveryMode: amqp.Persistent,\n\t\t\t\tTimestamp: timeStamp,\n\t\t\t\tContentType: \"text/plain\",\n\t\t\t\tBody: data,\n\t\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error publishing %s\", string(data))\n\t\t\tp.writeFailure(\n\t\t\t\tfmt.Sprintf(\"%s/%s-%d.txt\",\n\t\t\t\t\tp.failureDir,\n\t\t\t\t\troutingKey,\n\t\t\t\t\ttimeStamp.UnixNano()),\n\t\t\t\tdata)\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-p.eventsChan:\n\t\t\tsendMsg(\"raw_events\", event)\n\t\tcase meter := <-p.metersChan:\n\t\t\tsendMsg(\"raw_meters\", meter)\n\t\tcase <-p.quitChan:\n\t\t\tp.rabbitChannel.Close()\n\t\t\treturn\n\t\t}\n\t}\n}" ]
[ "0.6309216", "0.6276241", "0.62541133", "0.60858583", "0.59229887", "0.5826815", "0.58257556", "0.57818365", "0.57657427", "0.57529914", "0.57513297", "0.5672773", "0.5664691", "0.5659342", "0.5640284", "0.5628862", "0.5621618", "0.5619553", "0.5615171", "0.5578862", "0.5577405", "0.5563367", "0.55572134", "0.5496046", "0.5492482", "0.54880667", "0.54355574", "0.54350245", "0.54341716", "0.5421614", "0.5416859", "0.5408537", "0.5383528", "0.5353964", "0.53229994", "0.5320164", "0.528498", "0.52563876", "0.5255284", "0.5247733", "0.52473104", "0.5232415", "0.52258915", "0.52138895", "0.5213149", "0.5206655", "0.51696914", "0.5161267", "0.51593834", "0.5157887", "0.51531225", "0.5139675", "0.5132563", "0.5122796", "0.5122145", "0.5120248", "0.5119772", "0.5118312", "0.5115609", "0.511091", "0.51078093", "0.5106273", "0.51053536", "0.51022315", "0.5100026", "0.50975746", "0.509693", "0.50929993", "0.5085012", "0.5077948", "0.5076738", "0.50737613", "0.50737387", "0.5060638", "0.50587416", "0.505401", "0.5050255", "0.5047891", "0.50466037", "0.50464565", "0.504007", "0.50365055", "0.5022664", "0.50221884", "0.5019366", "0.50156355", "0.5007425", "0.50070846", "0.5005497", "0.5003986", "0.500278", "0.49961022", "0.49919873", "0.4988722", "0.49857172", "0.49764237", "0.49596602", "0.4951784", "0.49517325", "0.49515465" ]
0.5382721
33
eval evaluates two values using the evaluator's operation.
func (e *binaryExprEvaluator) eval(lhs, rhs interface{}) interface{} { switch e.op { case ADD: return lhs.(float64) + rhs.(float64) case SUB: return lhs.(float64) - rhs.(float64) case MUL: return lhs.(float64) * rhs.(float64) case DIV: rhs := rhs.(float64) if rhs == 0 { return float64(0) } return lhs.(float64) / rhs default: // TODO: Validate operation & data types. panic("invalid operation: " + e.op.String()) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func eval(binaryOp stmt.BinaryOP, left, right float64) float64 {\n\tswitch binaryOp {\n\tcase stmt.ADD:\n\t\treturn left + right\n\tcase stmt.SUB:\n\t\treturn left - right\n\tcase stmt.MUL:\n\t\treturn left * right\n\tcase stmt.DIV:\n\t\tif right == 0 {\n\t\t\treturn 0\n\t\t}\n\t\treturn left / right\n\tdefault:\n\t\treturn 0\n\t}\n}", "func evaluate(arg1 *vector.Vector, oper *vector.Vector, arg2 *vector.Vector) *vector.Vector {\n\t//Store the operator in a temp string, to save typing it out\n\tvar operS string\n\toperS = oper.At(0).(string)\n\tvar val1, val2 int \n\tvar err1, err2 os.Error\n\tval1, err1 = strconv.Atoi(arg1.At(0).(string))\n\tval2, err2 = strconv.Atoi(arg2.At(0).(string))\n\t//screens for consecutive operators\n\tif(err1 != nil || err2 != nil){\n\t\tfmt.Println(\"expr: syntax error\")\n\t\tos.Exit(-2)\n\t}\n\tvar result int = -1\n\t//Evaluate based on the operator\n\tif operS == \"+\" {\n\t\tresult = val1 + val2\n\t} else if operS == \"-\" {\n\t\tresult = val1 - val2\n\t} else if operS == \"/\" {\n\t\tresult = val1 / val2\n\t} else if operS == \"*\" {\n\t\tresult = val1 * val2\n\t} else if operS == \"%\" {\n\t\tresult = val1 % val2\n\t}\n\t//Clear the arg1 vector and add the result to it, then return\n\t//(saves memory by not creating a new vector)\n\targ1.Cut(0, arg1.Len())\n\targ1.Push(strconv.Itoa(result))\n\treturn arg1\n}", "func ExampleEval() {\n\tfmt.Println(Eval(\"5\"))\n\tfmt.Println(Eval(\"1 + 2\"))\n\tfmt.Println(Eval(\"1 - 2 + 3\"))\n\tfmt.Println(Eval(\"3 * ( 3 + 1 * 3 ) / 2\"))\n\tfmt.Println(Eval(\"3 * ( ( 3 + 1 ) * 3 ) / 2\"))\n\t//OutPut:\n\t//5\n\t//3\n\t//2\n\t//9\n\t//18\n}", "func (bp *BinaryPlus) Eval() float64 {\n\treturn bp.left.(Eval).Eval() + bp.right.(Eval).Eval()\n}", "func (ev *evaluator) eval(expr Expr) model.Value {\n\t// This is the top-level evaluation method.\n\t// Thus, we check for timeout/cancellation here.\n\tif err := contextDone(ev.ctx, \"expression evaluation\"); err != nil {\n\t\tev.error(err)\n\t}\n\n\tswitch e := expr.(type) {\n\tcase *AggregateExpr:\n\t\tvector := ev.evalVector(e.Expr)\n\t\treturn ev.aggregation(e.Op, e.Grouping, e.Without, e.KeepCommonLabels, e.Param, vector)\n\n\tcase *BinaryExpr:\n\t\tlhs := ev.evalOneOf(e.LHS, model.ValScalar, model.ValVector)\n\t\trhs := ev.evalOneOf(e.RHS, model.ValScalar, model.ValVector)\n\n\t\tswitch lt, rt := lhs.Type(), rhs.Type(); {\n\t\tcase lt == model.ValScalar && rt == model.ValScalar:\n\t\t\treturn &model.Scalar{\n\t\t\t\tValue: scalarBinop(e.Op, lhs.(*model.Scalar).Value, rhs.(*model.Scalar).Value),\n\t\t\t\tTimestamp: ev.Timestamp,\n\t\t\t}\n\n\t\tcase lt == model.ValVector && rt == model.ValVector:\n\t\t\tswitch e.Op {\n\t\t\tcase itemLAND:\n\t\t\t\treturn ev.vectorAnd(lhs.(vector), rhs.(vector), e.VectorMatching)\n\t\t\tcase itemLOR:\n\t\t\t\treturn ev.vectorOr(lhs.(vector), rhs.(vector), e.VectorMatching)\n\t\t\tcase itemLUnless:\n\t\t\t\treturn ev.vectorUnless(lhs.(vector), rhs.(vector), e.VectorMatching)\n\t\t\tdefault:\n\t\t\t\treturn ev.vectorBinop(e.Op, lhs.(vector), rhs.(vector), e.VectorMatching, e.ReturnBool)\n\t\t\t}\n\t\tcase lt == model.ValVector && rt == model.ValScalar:\n\t\t\treturn ev.vectorScalarBinop(e.Op, lhs.(vector), rhs.(*model.Scalar), false, e.ReturnBool)\n\n\t\tcase lt == model.ValScalar && rt == model.ValVector:\n\t\t\treturn ev.vectorScalarBinop(e.Op, rhs.(vector), lhs.(*model.Scalar), true, e.ReturnBool)\n\t\t}\n\n\tcase *Call:\n\t\treturn e.Func.Call(ev, e.Args)\n\n\tcase *MatrixSelector:\n\t\treturn ev.matrixSelector(e)\n\n\tcase *NumberLiteral:\n\t\treturn &model.Scalar{Value: e.Val, Timestamp: ev.Timestamp}\n\n\tcase *ParenExpr:\n\t\treturn ev.eval(e.Expr)\n\n\tcase *StringLiteral:\n\t\treturn &model.String{Value: e.Val, Timestamp: ev.Timestamp}\n\n\tcase *UnaryExpr:\n\t\tse := ev.evalOneOf(e.Expr, model.ValScalar, model.ValVector)\n\t\t// Only + and - are possible operators.\n\t\tif e.Op == itemSUB {\n\t\t\tswitch v := se.(type) {\n\t\t\tcase *model.Scalar:\n\t\t\t\tv.Value = -v.Value\n\t\t\tcase vector:\n\t\t\t\tfor i, sv := range v {\n\t\t\t\t\tv[i].Value = -sv.Value\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn se\n\n\tcase *VectorSelector:\n\t\treturn ev.vectorSelector(e)\n\t}\n\tpanic(fmt.Errorf(\"unhandled expression of type: %T\", expr))\n}", "func (s *Subtraction) Evaluate(left, right EvalResult) (EvalResult, error) {\n\treturn subtractNumericWithError(left, right)\n}", "func (c *ComparisonExpr) eval(env *ExpressionEnv, result *EvalResult) {\n\tvar left, right EvalResult\n\tleft.init(env, c.Left)\n\tright.init(env, c.Right)\n\tcmp, err := c.Op.compare(&left, &right)\n\tif err != nil {\n\t\tthrowEvalError(err)\n\t}\n\tresult.setBoolean(cmp)\n}", "func TestEvaluatorRelational(t *testing.T) {\n\tvar values = make(map[string]int)\n\ttestCases := []TestCase{\n\t\t{\n\t\t\tname: \"less than\",\n\t\t\texpression: \"1 < 2\",\n\t\t\texpectedValue: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"greater or equal\",\n\t\t\texpression: \"1 >= 2\",\n\t\t\texpectedValue: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"long expression\",\n\t\t\texpression: \"1 < 2 && 1 > 2 && 1 <= 2 && 1 >= 2 && 1==2 && 1 != 2\",\n\t\t\texpectedValue: 0,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult, err := evaluator.Evaluate(tc.expression, values)\n\t\t\tassert.NoError(t, err, \"unexpected error\")\n\t\t\tassert.Equal(t, tc.expectedValue, result)\n\t\t})\n\t}\n}", "func eval(expression TokenStream) (value int) {\n\ts := stack.New()\n\n\tfor _, token := range expression {\n\t\tif token.kind == OPERAND {\n\t\t\ts.Push(token)\n\t\t} else {\n\t\t\top1 := s.Pop().(Token)\n\t\t\top2 := s.Pop().(Token)\n\t\t\tvar result int\n\t\t\tswitch token.sValue {\n\t\t\tcase \"+\":\n\t\t\t\tresult = op1.iValue + op2.iValue\n\t\t\tcase \"*\":\n\t\t\t\tresult = op1.iValue * op2.iValue\n\t\t\t}\n\t\t\ts.Push(Token{kind: OPERAND, iValue: result})\n\t\t}\n\t}\n\n\tt := s.Pop().(Token)\n\tvalue = t.iValue\n\n\treturn\n}", "func TestEvaluatorArithmetic(t *testing.T) {\n\tvar values = make(map[string]int)\n\ttestCases := []TestCase{\n\t\t{\n\t\t\tname: \"short expression\",\n\t\t\texpression: \"1+2*3\",\n\t\t\texpectedValue: 7,\n\t\t},\n\t\t{\n\t\t\tname: \"long expression\",\n\t\t\texpression: \"4/2-1+5%2\",\n\t\t\texpectedValue: 2,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult, err := evaluator.Evaluate(tc.expression, values)\n\t\t\tassert.NoError(t, err, \"unexpected error\")\n\t\t\tassert.Equal(t, tc.expectedValue, result)\n\t\t})\n\t}\n}", "func TestEvaluatorValues(t *testing.T) {\n\tvar values = make(map[string]int)\n\tvalues[\"x\"] = 1\n\tvalues[\"y\"] = 2\n\texpression := \"x+y*2\"\n\n\tresult, err := evaluator.Evaluate(expression, values)\n\n\tassert.Nil(t, err, \"unexpected error\")\n\tassert.Equal(t, 5, result)\n}", "func TestEval(t *testing.T) {\n\tany := `.+`\n\ttestCases := []struct {\n\t\tname string\n\t\tquery string\n\t\twantErr string\n\t\twant []values.Value\n\t}{\n\t\t{\n\t\t\tname: \"string interpolation\",\n\t\t\tquery: `\n\t\t\t\tstr = \"str\"\n\t\t\t\ting = \"ing\"\n\t\t\t\t\"str + ing = ${str+ing}\"`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewString(\"str + ing = string\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"string interpolation missing field\",\n\t\t\tquery: `\n\t\t\t\tr = makeRecord(o: {a: \"foo\", b: 42})\n\t\t\t\t\"r._value = ${r._value}\"`,\n\t\t\twantErr: any,\n\t\t},\n\t\t{\n\t\t\tname: \"string interpolation non-string type\",\n\t\t\tquery: `\n\t\t\t\tr = makeRecord(o: {a: \"foo\", b: 42})\n\t\t\t\t\"r._value = ${r.b}\"`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewString(\"r._value = 42\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"string interpolation wrong type\",\n\t\t\tquery: `\n\t\t\t\tr = makeRecord(o: {a: \"foo\", b: 42})\n\t\t\t\t\"r = ${r}\"`,\n\t\t\twantErr: any,\n\t\t},\n\t\t{\n\t\t\tname: \"call builtin function\",\n\t\t\tquery: \"six()\",\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewFloat(6.0),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"call function with fail\",\n\t\t\tquery: \"fail()\",\n\t\t\twantErr: any,\n\t\t},\n\t\t{\n\t\t\tname: \"call function with duplicate args\",\n\t\t\tquery: \"plusOne(x:1.0, x:2.0)\",\n\t\t\twantErr: any,\n\t\t},\n\t\t{\n\t\t\tname: \"binary expressions\",\n\t\t\tquery: `\n\t\t\tsix_value = six()\n\t\t\tnine_value = nine()\n\n\t\t\tfortyTwo() == six_value * nine_value\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"logical expressions short circuit\",\n\t\t\tquery: `\n six_value = six()\n nine_value = nine()\n\n not (fortyTwo() == six_value * nine_value) or fail()\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"function\",\n\t\t\tquery: `\n plusSix = (r) => r + six()\n plusSix(r:1.0) == 7.0 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"function block\",\n\t\t\tquery: `\n f = (r) => {\n r1 = 1.0 + r\n return (r + r1) / r\n }\n f(r:1.0) == 3.0 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"function block polymorphic\",\n\t\t\tquery: `\n f = (r) => {\n r2 = r * r\n return r2 / r\n }\n f(r:2.0) == 2.0 or fail()\n f(r:2) == 2 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"function with default param\",\n\t\t\tquery: `\n addN = (r,n=4) => r + n\n addN(r:2) == 6 or fail()\n addN(r:3,n:1) == 4 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"scope closing\",\n\t\t\tquery: `\n\t\t\tx = 5\n plusX = (r) => r + x\n plusX(r:2) == 7 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"nested scope mutations not visible outside\",\n\t\t\tquery: `\n\t\t\tx = 5\n xinc = () => {\n x = x + 1\n return x\n }\n xinc() == 6 or fail()\n x == 5 or fail()\n\t\t\t`,\n\t\t},\n\t\t// TODO(jsternberg): This test seems to not\n\t\t// infer the type constraints correctly for m.a,\n\t\t// but it doesn't fail.\n\t\t{\n\t\t\tname: \"return map from func\",\n\t\t\tquery: `\n toMap = (a,b) => ({\n a: a,\n b: b,\n })\n m = toMap(a:1, b:false)\n m.a == 1 or fail()\n not m.b or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"pipe expression\",\n\t\t\tquery: `\n\t\t\tadd = (a=<-,b) => a + b\n\t\t\tone = 1\n\t\t\tone |> add(b:2) == 3 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"ignore pipe default\",\n\t\t\tquery: `\n\t\t\tadd = (a=<-,b) => a + b\n\t\t\tadd(a:1, b:2) == 3 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"pipe expression function\",\n\t\t\tquery: `\n\t\t\tadd = (a=<-,b) => a + b\n\t\t\tsix() |> add(b:2.0) == 8.0 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"pipe builtin function\",\n\t\t\tquery: `\n\t\t\tsix() |> plusOne() == 7.0 or fail()\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"regex match\",\n\t\t\tquery: `\n\t\t\t\"abba\" =~ /^a.*a$/ or fail()\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"regex not match\",\n\t\t\tquery: `\n\t\t\t\"abc\" =~ /^a.*a$/ and fail()\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"not regex match\",\n\t\t\tquery: `\n\t\t\t\"abc\" !~ /^a.*a$/ or fail()\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"not regex not match\",\n\t\t\tquery: `\n\t\t\t\"abba\" !~ /^a.*a$/ and fail()\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"options metadata\",\n\t\t\tquery: `\n\t\t\toption task = {\n\t\t\t\tname: \"foo\",\n\t\t\t\trepeat: 100,\n\t\t\t}\n\t\t\ttask.name == \"foo\" or fail()\n\t\t\ttask.repeat == 100 or fail()\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(true),\n\t\t\t\tvalues.NewBool(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"query with side effects\",\n\t\t\tquery: `sideEffect() == 0 or fail()`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewInt(0),\n\t\t\t\tvalues.NewBool(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"array index expression\",\n\t\t\tquery: `\n\t\t\t\ta = [1, 2, 3]\n\t\t\t\tx = a[1]\n\t\t\t\tx == 2 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"dict expression\",\n\t\t\tquery: `\n\t\t\t\tm = [\"a\" + \"b\": 0, \"c\" + \"d\": 1]\n\t\t\t\tx = get(dict: m, key: \"ab\", default: 2)\n\t\t\t\ty = get(dict: m, key: \"cd\", default: 2)\n\t\t\t\tz = get(dict: m, key: \"ef\", default: 2)\n\t\t\t\tx == 0 and y == 1 and z == 2 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"empy dictionary\",\n\t\t\tquery: `\n\t\t\t\tm0 = [:]\n\t\t\t\tm1 = insert(dict: m0, key: \"a\", value: 0)\n\t\t\t\tm2 = insert(dict: m0, key: 0, value: \"a\")\n\t\t\t\tv1 = get(dict: m1, key: \"a\", default: -1)\n\t\t\t\tv2 = get(dict: m2, key: 0, default: \"b\")\n\t\t\t\tv1 == 0 and v2 == \"a\" or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"array index expression out of bounds low\",\n\t\t\tquery: `\n\t\t\t\ta = [1, 2, 3]\n\t\t\t\ti = -1\n\t\t\t\tx = a[i]\n\t\t\t`,\n\t\t\twantErr: any,\n\t\t},\n\t\t{\n\t\t\tname: \"array index expression out of bounds high\",\n\t\t\tquery: `\n\t\t\t\ta = [1, 2, 3]\n\t\t\t\ti = 3\n\t\t\t\tx = a[i]\n\t\t\t`,\n\t\t\twantErr: any,\n\t\t},\n\t\t{\n\t\t\tname: \"array with complex index expression\",\n\t\t\tquery: `\n\t\t\t\tf = () => ({l: 0, m: 1, n: 2})\n\t\t\t\ta = [1, 2, 3]\n\t\t\t\tx = a[f().l]\n\t\t\t\ty = a[f().m]\n\t\t\t\tz = a[f().n]\n\t\t\t\tx == 1 or fail()\n\t\t\t\ty == 2 or fail()\n\t\t\t\tz == 3 or fail()\n\t\t\t`,\n\t\t},\n\t\t{\n\t\t\tname: \"short circuit logical and\",\n\t\t\tquery: `\n false and fail()\n `,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"short circuit logical or\",\n\t\t\tquery: `\n true or fail()\n `,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"no short circuit logical and\",\n\t\t\tquery: `\n true and fail()\n `,\n\t\t\twantErr: any,\n\t\t},\n\t\t{\n\t\t\tname: \"no short circuit logical or\",\n\t\t\tquery: `\n false or fail()\n `,\n\t\t\twantErr: any,\n\t\t},\n\t\t{\n\t\t\tname: \"conditional true\",\n\t\t\tquery: `\n\t\t\t\tif 1 != 0 then 10 else 100\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewInt(10),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"conditional false\",\n\t\t\tquery: `\n\t\t\t\tif 1 == 0 then 10 else 100\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewInt(100),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"conditional in function\",\n\t\t\tquery: `\n\t\t\t\tf = (t, c, a) => if t then c else a\n\t\t\t\t{\n\t\t\t\t\tv1: f(t: false, c: 30, a: 300),\n\t\t\t\t\tv2: f(t: true, c: \"cats\", a: \"dogs\"),\n\t\t\t\t}\n\t\t\t`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewObjectWithValues(map[string]values.Value{\n\t\t\t\t\t\"v1\": values.NewInt(300),\n\t\t\t\t\t\"v2\": values.NewString(\"cats\"),\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"exists\",\n\t\t\tquery: `hasValue(o: makeRecord(o: {value: 1}))`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"exists null\",\n\t\t\tquery: `hasValue(o: makeRecord(o: {val: 2}))`,\n\t\t\twant: []values.Value{\n\t\t\t\tvalues.NewBool(false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid function parameter\",\n\t\t\tquery: `from(bucket: \"telegraf\") |> window(every: 0s)`,\n\t\t\twantErr: `error calling function \"window\" @\\d+:\\d+-\\d+:\\d+: window function requires at least one of \"every\" or \"period\" to be set and non-zero`,\n\t\t},\n\t\t{\n\t\t\t// tests that we don't nest error messages when\n\t\t\t// a function call fails and gets piped into another\n\t\t\t// function.\n\t\t\tname: \"nested function error\",\n\t\t\tquery: `from(bucket: \"telegraf\") |> window(every: 0s) |> mean()`,\n\t\t\twantErr: `error calling function \"window\" @\\d+:\\d+-\\d+:\\d+: window function requires at least one of \"every\" or \"period\" to be set and non-zero`,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tsrc := prelude + tc.query\n\n\t\t\tctx, deps := dependency.Inject(context.Background(), dependenciestest.Default())\n\t\t\tdefer deps.Finish()\n\n\t\t\tsideEffects, _, err := runtime.Eval(ctx, src)\n\t\t\tif err != nil {\n\t\t\t\tif tc.wantErr == \"\" {\n\t\t\t\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t\t\t\t}\n\n\t\t\t\t// We expect an error, so it should be a non-internal Flux error.\n\t\t\t\tif code := flux.ErrorCode(err); code == codes.Internal || code == codes.Unknown {\n\t\t\t\t\tt.Errorf(\"expected non-internal error code, got %s\", code)\n\t\t\t\t}\n\n\t\t\t\tre := regexp.MustCompile(tc.wantErr)\n\t\t\t\tif got := err.Error(); !re.MatchString(got) {\n\t\t\t\t\tt.Errorf(\"expected error to match pattern %q, but error was %q\", tc.wantErr, got)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t} else if tc.wantErr != \"\" {\n\t\t\t\tt.Fatal(\"expected error\")\n\t\t\t}\n\n\t\t\tvs := getSideEffectsValues(sideEffects)\n\t\t\tif tc.want != nil && !cmp.Equal(tc.want, vs, semantictest.CmpOptions...) {\n\t\t\t\tt.Fatalf(\"unexpected side effect values -want/+got: \\n%s\", cmp.Diff(tc.want, vs, semantictest.CmpOptions...))\n\t\t\t}\n\t\t})\n\t}\n}", "func (op *OpPlus) Eval(x, y float32) float32 {\n\treturn op.LeftChild.Eval(x, y) + op.RightChild.Eval(x, y)\n}", "func (op *OpPlus) Eval(x, y float32) float32 {\n\treturn op.LeftChild.Eval(x, y) + op.RightChild.Eval(x, y)\n}", "func Eval(ctx context.Context, e Expr, vs Values) (interface{}, error) {\r\n\tfn, err := FuncOf(ctx, e, vs)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn fn.Call(ctx, vs)\r\n}", "func (e *EqualOp) Evaluate(left, right EvalResult) (EvalResult, error) {\n\tif out, err := e.IsTrue(left, right); err != nil || !out {\n\t\treturn resultFalse, err\n\t}\n\treturn resultTrue, nil\n}", "func (a *Addition) Evaluate(left, right EvalResult) (EvalResult, error) {\n\treturn addNumericWithError(left, right)\n}", "func (op *OpConstant) Eval(x, y float32) float32 {\n\treturn op.value\n}", "func (f *Function) Eval(inputs ...interface{}) (args.Const, error) {\n\tlenInputs := len(inputs)\n\tif lenInputs != f.numVars {\n\t\treturn nil, errors.New(\"Number of inputs is not equal to the number of variables in function\")\n\t}\n\n\tvar operand1 args.Const\n\tvar operand2 args.Const\n\tvar operandStack []args.Const\n\n\ti := 0\n\tfor i < len(f.Args) {\n\t\tif f.typeInput(i) == args.Constant || f.typeInput(i) == args.Variable {\n\t\t\tvariable, err := f.getVar(i)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif lenInputs != 0 {\n\t\t\t\toperand, err := variable.Eval(inputs[f.varNum[variable]])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\toperandStack = append(operandStack, operand)\n\t\t\t} else {\n\t\t\t\t// If length inputs is 0, then all variables must be constant.\n\t\t\t\t// This code assumes variable is a constant and so uses 0 as an input\n\t\t\t\t// to MustEval as it will never fail as the input does not matter for constants\n\t\t\t\toperandStack = append(operandStack, variable.MustEval(0))\n\t\t\t}\n\t\t} else if f.typeInput(i) == args.Operation {\n\t\t\toperation, err := f.getOp(i)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif h, ok := unaryFuncs[operation]; ok {\n\t\t\t\tif len(operandStack) == 0 {\n\t\t\t\t\treturn nil, errors.New(\"Not enough operands\")\n\t\t\t\t}\n\n\t\t\t\toperand1, operandStack = operandStack[len(operandStack)-1], operandStack[:len(operandStack)-1]\n\t\t\t\tresult, err := h(operand1)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\toperandStack = append(operandStack, result)\n\t\t\t} else if h, ok := binaryFuncs[operation]; ok {\n\t\t\t\tif len(operandStack) < 2 {\n\t\t\t\t\treturn nil, errors.New(\"Not enough operands\")\n\t\t\t\t}\n\n\t\t\t\toperand2, operandStack = operandStack[len(operandStack)-1], operandStack[:len(operandStack)-1]\n\t\t\t\toperand1, operandStack = operandStack[len(operandStack)-1], operandStack[:len(operandStack)-1]\n\t\t\t\tresult, err := h(operand1, operand2)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\toperandStack = append(operandStack, result)\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"Operation not supported\")\n\t\t\t}\n\t\t}\n\t\ti++\n\t}\n\n\tif len(operandStack) > 1 {\n\t\treturn nil, errors.New(\"To many operands left over after calculation\")\n\t}\n\n\treturn operandStack[0], nil\n}", "func (op *OpX) Eval(x, y float32) float32 {\n\treturn x\n}", "func (op *OpX) Eval(x, y float32) float32 {\n\treturn x\n}", "func (e *Exp) Eval() float64 {\n\te.init()\n\tresult, _ := e.eval(e.opTree)\n\treturn result\n}", "func (ast *Binary) Eval(env *Env, ctx *Codegen, gen *ssa.Generator) (\n\tssa.Value, bool, error) {\n\tl, ok, err := ast.Left.Eval(env, ctx, gen)\n\tif err != nil || !ok {\n\t\treturn ssa.Undefined, ok, err\n\t}\n\tr, ok, err := ast.Right.Eval(env, ctx, gen)\n\tif err != nil || !ok {\n\t\treturn ssa.Undefined, ok, err\n\t}\n\n\tswitch lval := l.ConstValue.(type) {\n\tcase bool:\n\t\trval, ok := r.ConstValue.(bool)\n\t\tif !ok {\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"invalid types: %s %s %s\", l, ast.Op, r)\n\t\t}\n\t\tswitch ast.Op {\n\t\tcase BinaryEq:\n\t\t\treturn gen.Constant(lval == rval, types.Bool), true, nil\n\t\tcase BinaryNeq:\n\t\t\treturn gen.Constant(lval != rval, types.Bool), true, nil\n\t\tcase BinaryAnd:\n\t\t\treturn gen.Constant(lval && rval, types.Bool), true, nil\n\t\tcase BinaryOr:\n\t\t\treturn gen.Constant(lval || rval, types.Bool), true, nil\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"Binary.Eval: '%v %v %v' not supported\", l, ast.Op, r)\n\t\t}\n\n\tcase int32:\n\t\tvar rval int32\n\t\tswitch rv := r.ConstValue.(type) {\n\t\tcase int32:\n\t\t\trval = rv\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"invalid r-value %T %s %T\", lval, ast.Op, rv)\n\t\t}\n\t\tswitch ast.Op {\n\t\tcase BinaryMult:\n\t\t\treturn gen.Constant(lval*rval, types.Int32), true, nil\n\t\tcase BinaryDiv:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval/rval, types.Int32), true, nil\n\t\tcase BinaryMod:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval%rval, types.Int32), true, nil\n\t\tcase BinaryLshift:\n\t\t\treturn gen.Constant(lval<<rval, types.Int32), true, nil\n\t\tcase BinaryRshift:\n\t\t\treturn gen.Constant(lval>>rval, types.Int32), true, nil\n\t\tcase BinaryBand:\n\t\t\treturn gen.Constant(lval&rval, types.Int32), true, nil\n\t\tcase BinaryBclear:\n\t\t\treturn gen.Constant(lval&^rval, types.Int32), true, nil\n\t\tcase BinaryBor:\n\t\t\treturn gen.Constant(lval|rval, types.Int32), true, nil\n\t\tcase BinaryBxor:\n\t\t\treturn gen.Constant(lval^rval, types.Int32), true, nil\n\n\t\tcase BinaryPlus:\n\t\t\treturn gen.Constant(lval+rval, types.Int32), true, nil\n\t\tcase BinaryMinus:\n\t\t\treturn gen.Constant(lval-rval, types.Int32), true, nil\n\n\t\tcase BinaryEq:\n\t\t\treturn gen.Constant(lval == rval, types.Bool), true, nil\n\t\tcase BinaryNeq:\n\t\t\treturn gen.Constant(lval != rval, types.Bool), true, nil\n\t\tcase BinaryLt:\n\t\t\treturn gen.Constant(lval < rval, types.Bool), true, nil\n\t\tcase BinaryLe:\n\t\t\treturn gen.Constant(lval <= rval, types.Bool), true, nil\n\t\tcase BinaryGt:\n\t\t\treturn gen.Constant(lval > rval, types.Bool), true, nil\n\t\tcase BinaryGe:\n\t\t\treturn gen.Constant(lval >= rval, types.Bool), true, nil\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"Binary.Eval: '%v %s %v' not implemented yet\", l, ast.Op, r)\n\t\t}\n\n\tcase uint64:\n\t\tvar rval uint64\n\t\tswitch rv := r.ConstValue.(type) {\n\t\tcase uint64:\n\t\t\trval = rv\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"%T: invalid r-value %v (%T)\", lval, rv, rv)\n\t\t}\n\t\tswitch ast.Op {\n\t\tcase BinaryMult:\n\t\t\treturn gen.Constant(lval*rval, types.Uint64), true, nil\n\t\tcase BinaryDiv:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval/rval, types.Uint64), true, nil\n\t\tcase BinaryMod:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval%rval, types.Uint64), true, nil\n\t\tcase BinaryLshift:\n\t\t\treturn gen.Constant(lval<<rval, types.Uint64), true, nil\n\t\tcase BinaryRshift:\n\t\t\treturn gen.Constant(lval>>rval, types.Uint64), true, nil\n\n\t\tcase BinaryPlus:\n\t\t\treturn gen.Constant(lval+rval, types.Uint64), true, nil\n\t\tcase BinaryMinus:\n\t\t\treturn gen.Constant(lval-rval, types.Uint64), true, nil\n\n\t\tcase BinaryEq:\n\t\t\treturn gen.Constant(lval == rval, types.Bool), true, nil\n\t\tcase BinaryNeq:\n\t\t\treturn gen.Constant(lval != rval, types.Bool), true, nil\n\t\tcase BinaryLt:\n\t\t\treturn gen.Constant(lval < rval, types.Bool), true, nil\n\t\tcase BinaryLe:\n\t\t\treturn gen.Constant(lval <= rval, types.Bool), true, nil\n\t\tcase BinaryGt:\n\t\t\treturn gen.Constant(lval > rval, types.Bool), true, nil\n\t\tcase BinaryGe:\n\t\t\treturn gen.Constant(lval >= rval, types.Bool), true, nil\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"Binary.Eval: '%v %s %v' not implemented yet\", l, ast.Op, r)\n\t\t}\n\n\tdefault:\n\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Left,\n\t\t\t\"%s %v %s: invalid l-value %v (%T)\", l, ast.Op, r, lval, lval)\n\t}\n}", "func (r *RegexpOp) Evaluate(left, right EvalResult) (EvalResult, error) {\n\tpanic(\"implement me\")\n}", "func (n *NullSafeEqualOp) Evaluate(left, right EvalResult) (EvalResult, error) {\n\tpanic(\"implement me\")\n}", "func Eval(txApp *sysl.Application, assign Scope, e *sysl.Expr) *sysl.Value {\n\tswitch x := e.Expr.(type) {\n\tcase *sysl.Expr_Transform_:\n\t\treturn evalTransform(txApp, assign, x, e)\n\tcase *sysl.Expr_Binexpr:\n\t\treturn evalBinExpr(txApp, assign, x.Binexpr)\n\tcase *sysl.Expr_Call_:\n\t\treturn evalCall(txApp, assign, x)\n\tcase *sysl.Expr_Name:\n\t\treturn evalName(assign, x)\n\tcase *sysl.Expr_GetAttr_:\n\t\treturn evalGetAttr(txApp, assign, x)\n\tcase *sysl.Expr_Ifelse:\n\t\treturn evalIfelse(txApp, assign, x)\n\tcase *sysl.Expr_Literal:\n\t\treturn x.Literal\n\tcase *sysl.Expr_Set:\n\t\treturn evalSet(txApp, assign, x)\n\tcase *sysl.Expr_List_:\n\t\treturn evalList(txApp, assign, x)\n\tcase *sysl.Expr_Unexpr:\n\t\treturn evalUnaryFunc(x.Unexpr.Op, Eval(txApp, assign, x.Unexpr.Arg))\n\tdefault:\n\t\tlogrus.Warnf(\"Skipping Expr of type %T\\n\", x)\n\t\treturn nil\n\t}\n}", "func (a *AddActivity) Eval(context activity.Context) (done bool, err error) {\n\n\t//mv := context.GetInput(ivMessage)\n\tnum1, _ := context.GetInput(ivNum1).(int)\n\tnum2, _ := context.GetInput(ivNum2).(int)\n\n\tactivityLog.Info(fmt.Sprintf(\"Num1: %d, Num2: %d\", num1, num2))\n\tactivityLog.Info(fmt.Sprintf(\"Addition is : %d\", num1+num2))\n\tcontext.SetOutput(ovAddition, num1+num2)\n\n\treturn true, nil\n}", "func (d *Division) Evaluate(left, right EvalResult) (EvalResult, error) {\n\treturn divideNumericWithError(left, right)\n}", "func (l *LikeOp) Evaluate(left, right EvalResult) (EvalResult, error) {\n\tpanic(\"implement me\")\n}", "func (op *OpDiv) Eval(x, y float32) float32 {\n\treturn op.LeftChild.Eval(x, y) / op.RightChild.Eval(x, y)\n}", "func NewEvaluationExpression(op OP, lB, rB string) (Evaluator, error) {\n\tl, r := strings.TrimSpace(lB), strings.TrimSpace(rB)\n\tif l == \"\" || r == \"\" {\n\t\treturn nil, fmt.Errorf(\"bindings cannot be empty; got %q, %q\", l, r)\n\t}\n\tswitch op {\n\tcase EQ, LT, GT:\n\t\treturn &evaluationNode{\n\t\t\top: op,\n\t\t\tlB: lB,\n\t\t\trB: rB,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, errors.New(\"evaluation expressions require the operation to be one for the follwing '=', '<', '>'\")\n\t}\n}", "func (f *function) Eval(a *Apl) (Value, error) {\n\tvar err error\n\tvar l, r Value\n\n\t// The right argument must be evaluated first.\n\t// Otherwise this A←1⋄A+(A←2) evaluates to 3,\n\t// but it should evaluate to 4.\n\tr, err = f.right.Eval(a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif f.left != nil {\n\n\t\t// Special case for modified assignments.\n\t\t// Defer evaluation of the left argument.\n\t\tif d, ok := f.Function.(*derived); ok && d.op == \"←\" {\n\t\t\tl = assignment{f.left}\n\t\t} else {\n\t\t\tl, err = f.left.Eval(a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Special case: the last function in a selective assignment uses Select instead of Call.\n\tif _, ok := f.right.(numVar); ok && f.selection {\n\t\tif d, ok := f.Function.(*derived); ok == true {\n\t\t\treturn d.Select(a, l, r)\n\t\t} else if p, ok := f.Function.(Primitive); ok == false {\n\t\t\treturn nil, fmt.Errorf(\"cannot use %T in selective assignment\", f.Function)\n\t\t} else {\n\t\t\treturn p.Select(a, l, r)\n\t\t}\n\t}\n\treturn f.Function.Call(a, l, r)\n}", "func (op *OpY) Eval(x, y float32) float32 {\n\treturn y\n}", "func (op *OpY) Eval(x, y float32) float32 {\n\treturn y\n}", "func (m *Multiplication) Evaluate(left, right EvalResult) (EvalResult, error) {\n\treturn multiplyNumericWithError(left, right)\n}", "func (e *Evaluator) Eval(expr *tipb.Expr) (types.Datum, error) {\n\tswitch expr.GetTp() {\n\tcase tipb.ExprType_Null:\n\t\treturn types.Datum{}, nil\n\tcase tipb.ExprType_Int64:\n\t\treturn e.evalInt(expr.Val)\n\tcase tipb.ExprType_Uint64:\n\t\treturn e.evalUint(expr.Val)\n\tcase tipb.ExprType_String:\n\t\treturn e.evalString(expr.Val)\n\tcase tipb.ExprType_Bytes:\n\t\treturn types.NewBytesDatum(expr.Val), nil\n\tcase tipb.ExprType_Float32:\n\t\treturn e.evalFloat(expr.Val, true)\n\tcase tipb.ExprType_Float64:\n\t\treturn e.evalFloat(expr.Val, false)\n\tcase tipb.ExprType_MysqlDecimal:\n\t\treturn e.evalDecimal(expr.Val)\n\tcase tipb.ExprType_MysqlDuration:\n\t\treturn e.evalDuration(expr.Val)\n\tcase tipb.ExprType_ColumnRef:\n\t\treturn e.evalColumnRef(expr.Val)\n\tcase tipb.ExprType_LT:\n\t\treturn e.evalLT(expr)\n\tcase tipb.ExprType_LE:\n\t\treturn e.evalLE(expr)\n\tcase tipb.ExprType_EQ:\n\t\treturn e.evalEQ(expr)\n\tcase tipb.ExprType_NE:\n\t\treturn e.evalNE(expr)\n\tcase tipb.ExprType_GE:\n\t\treturn e.evalGE(expr)\n\tcase tipb.ExprType_GT:\n\t\treturn e.evalGT(expr)\n\tcase tipb.ExprType_NullEQ:\n\t\treturn e.evalNullEQ(expr)\n\tcase tipb.ExprType_And:\n\t\treturn e.evalAnd(expr)\n\tcase tipb.ExprType_Or:\n\t\treturn e.evalOr(expr)\n\tcase tipb.ExprType_Like:\n\t\treturn e.evalLike(expr)\n\tcase tipb.ExprType_Not:\n\t\treturn e.evalNot(expr)\n\tcase tipb.ExprType_In:\n\t\treturn e.evalIn(expr)\n\tcase tipb.ExprType_Plus, tipb.ExprType_Div:\n\t\treturn e.evalArithmetic(expr)\n\t}\n\treturn types.Datum{}, nil\n}", "func binaryEval(binaryOp stmt.BinaryOP, left, right *collections.FloatArray) *collections.FloatArray {\n\tif left == nil || right == nil {\n\t\treturn nil\n\t}\n\tif left.IsEmpty() && right.IsEmpty() {\n\t\treturn nil\n\t}\n\n\tcapacity := left.Capacity()\n\tresult := collections.NewFloatArray(capacity)\n\n\tfor i := 0; i < capacity; i++ {\n\t\tleftHasValue := left.HasValue(i)\n\t\trightHasValue := right.HasValue(i)\n\t\tswitch {\n\t\tcase !leftHasValue && right.IsSingle():\n\t\tcase left.IsSingle() && !rightHasValue:\n\t\tcase leftHasValue || rightHasValue:\n\t\t\tresult.SetValue(i, eval(binaryOp, left.GetValue(i), right.GetValue(i)))\n\t\t}\n\t}\n\n\treturn result\n}", "func (e *BinExpr) Eval(ctx context.Context, local Scope) (_ Value, err error) {\n\ta, err := e.a.Eval(ctx, local)\n\tif err != nil {\n\t\treturn nil, WrapContextErr(err, e, local)\n\t}\n\n\tb, err := e.b.Eval(ctx, local)\n\tif err != nil {\n\t\treturn nil, WrapContextErr(err, e, local)\n\t}\n\tval, err := e.eval(ctx, a, b, local)\n\tif err != nil {\n\t\treturn nil, WrapContextErr(err, e, local)\n\t}\n\treturn val, nil\n}", "func eval2(list []*Item) int {\n\tfor len(list) > 1 {\n\n\t\tfor i := 0; i < len(list); i++ {\n\t\t\tif list[i].Typ == Operation {\n\t\t\t\tleft := list[i-2]\n\t\t\t\tright := list[i-1]\n\t\t\t\tvar val int\n\t\t\t\tswitch list[i].Operation {\n\t\t\t\tcase \"+\":\n\t\t\t\t\tval = left.Value + right.Value\n\t\t\t\tcase \"-\":\n\t\t\t\t\tval = left.Value - right.Value\n\t\t\t\tcase \"/\":\n\t\t\t\t\t// Watch for div-by-zero\n\t\t\t\t\tval = left.Value / right.Value\n\t\t\t\tcase \"*\":\n\t\t\t\t\tval = left.Value * right.Value\n\t\t\t\t}\n\t\t\t\tlist[i] = &Item{Typ: Number, Value: val}\n\t\t\t\t// The only tricky part: excising the two Number-type\n\t\t\t\t// elements of the slice list\n\t\t\t\tlist = append(list[:i-2], list[i:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn list[0].Value\n}", "func TestEvaluatorWrongInput(t *testing.T) {\n\tvar values = make(map[string]int)\n\ttestCases := []TestCase{\n\t\t{\n\t\t\tname: \"mul instead of right operand\",\n\t\t\texpression: \"1**\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t{\n\t\t\tname: \"forgot closing parenthesis\",\n\t\t\texpression: \"(1+2*\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no operands\",\n\t\t\texpression: \"+\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no right operand\",\n\t\t\texpression: \"2+\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no left operand\",\n\t\t\texpression: \"+2\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no left operand (minus)\",\n\t\t\texpression: \"-2\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t{\n\t\t\tname: \"== typo\",\n\t\t\texpression: \"0=0\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t{\n\t\t\tname: \"zero division\",\n\t\t\texpression: \"1/0\",\n\t\t\texpectedError: true,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tif tc.expectedError {\n\t\t\t\tresult, err := evaluator.Evaluate(tc.expression, values)\n\t\t\t\tassert.Error(t, err, \"error is expected\")\n\t\t\t\tassert.Equal(t, -1, result)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestEvaluateRPNArithmeticOperation(t *testing.T) {\n\t// tokens to be tokenized\n\ttokens := []evaluator.TokenWithValue{\n\t\t// RPN order (postfix)\n\t\tevaluator.ValueToken(token.INT, 1),\n\t\tevaluator.ValueToken(token.INT, 2),\n\t\tevaluator.OperatorToken(token.ADD),\n\t}\n\n\t// value map used during evaluation\n\tvar values = make(map[string]int)\n\n\t// evaluate expression represented as sequence of tokens in RPN order\n\tstack, err := evaluator.EvaluateRPN(tokens, values)\n\n\t// check the output\n\tassert.NoError(t, err)\n\tassert.False(t, stack.Empty())\n\tassert.Equal(t, stack.Size(), 1)\n\n\tvalue, err := stack.Pop()\n\tassert.NoError(t, err)\n\tassert.Equal(t, value, 3)\n}", "func (e CompareExpr) Eval(local Scope) (Value, error) {\n\tlhs, err := e.args[0].Eval(local)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i, arg := range e.args[1:] {\n\t\trhs, err := arg.Eval(local)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !e.comps[i](lhs, rhs) {\n\t\t\treturn False, nil\n\t\t}\n\t\tlhs = rhs\n\t}\n\treturn True, nil\n}", "func (bo BinaryOperator) EvaluateLeftAndRight(vars map[string]interface{}, ctx interface{}, funcs FunctionMap, quotes []string) (map[string]interface{}, interface{}, interface{}, error) {\n\n\tvars, lv, err := bo.Left.Evaluate(vars, ctx, funcs, quotes)\n\tif err != nil {\n\t\treturn vars, false, false, err\n\t}\n\tvars, rv, err := bo.Right.Evaluate(vars, ctx, funcs, quotes)\n\tif err != nil {\n\t\treturn vars, false, false, err\n\t}\n\treturn vars, lv, rv, nil\n}", "func eval(sc *scope, e sexpr) sexpr {\n\te = transform(sc, e)\n\tswitch e := e.(type) {\n\tcase cons: // a function to evaluate\n\t\tcons := e\n\t\tcar := eval(sc, cons.car)\n\t\tif !isFunction(car) && !isPrimitive(car) {\n\t\t\tpanic(\"Attempted application on non-function\")\n\t\t}\n\t\tcdr := cons.cdr\n\t\targs := flatten(cdr)\n\t\tif isPrimitive(car) {\n\t\t\treturn (car.(primitive))(sc, args)\n\t\t}\n\t\tf := car.(function)\n\t\t// This is a function - evaluate all arguments\n\t\tfor i, a := range args {\n\t\t\targs[i] = eval(sc, a)\n\t\t}\n\t\treturn f(sc, args)\n\tcase sym:\n\t\treturn sc.lookup(e)\n\t}\n\treturn e\n}", "func (l *LessEqualOp) Evaluate(left, right EvalResult) (EvalResult, error) {\n\tif out, err := l.IsTrue(left, right); err != nil || !out {\n\t\treturn resultFalse, err\n\t}\n\treturn resultTrue, nil\n}", "func (i *InOp) Evaluate(left, right EvalResult) (EvalResult, error) {\n\tpanic(\"implement me\")\n}", "func (op *OpMinus) Eval(x, y float32) float32 {\n\treturn op.LeftChild.Eval(x, y) - op.RightChild.Eval(x, y)\n}", "func (g *GreaterEqualOp) Evaluate(left, right EvalResult) (EvalResult, error) {\n\tif out, err := g.IsTrue(left, right); err != nil || !out {\n\t\treturn resultFalse, err\n\t}\n\treturn resultTrue, nil\n}", "func Evaluate(query string, values map[string]interface{}) interface{} {\n\ttokens := Parser(query)\n\trpn := ToPostfix(tokens)\n\touts := SolvePostfix(rpn, values)\n\treturn outs\n}", "func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {\n\t// Configure error recovery for unexpected panics during evaluation. Note, the use of named\n\t// return values makes it possible to modify the error response during the recovery\n\t// function.\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tswitch t := r.(type) {\n\t\t\tcase interpreter.EvalCancelledError:\n\t\t\t\terr = t\n\t\t\tdefault:\n\t\t\t\terr = fmt.Errorf(\"internal error: %v\", r)\n\t\t\t}\n\t\t}\n\t}()\n\t// Build a hierarchical activation if there are default vars set.\n\tvar vars interpreter.Activation\n\tswitch v := input.(type) {\n\tcase interpreter.Activation:\n\t\tvars = v\n\tcase map[string]any:\n\t\tvars = activationPool.Setup(v)\n\t\tdefer activationPool.Put(vars)\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"invalid input, wanted Activation or map[string]any, got: (%T)%v\", input, input)\n\t}\n\tif p.defaultVars != nil {\n\t\tvars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)\n\t}\n\tv = p.interpretable.Eval(vars)\n\t// The output of an internal Eval may have a value (`v`) that is a types.Err. This step\n\t// translates the CEL value to a Go error response. This interface does not quite match the\n\t// RPC signature which allows for multiple errors to be returned, but should be sufficient.\n\tif types.IsError(v) {\n\t\terr = v.(*types.Err)\n\t}\n\treturn\n}", "func (op *OpMult) Eval(x, y float32) float32 {\n\treturn op.LeftChild.Eval(x, y) * op.RightChild.Eval(x, y)\n}", "func (op *OpAtan2) Eval(x, y float32) float32 {\n\treturn float32(math.Atan2(float64(y), float64(x)))\n}", "func evalBinaryStringExpr(ctx *Ctx, x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) {\n\tvar err error\n\tvar r string\n\tvar b bool\n\tis_bool := false\n\n\txx, yy := x.String(), y.String()\n\tswitch op {\n\tcase token.ADD:\tr = xx + yy\n\tcase token.EQL: b = xx == yy; is_bool = true\n\tcase token.NEQ: b = xx != yy; is_bool = true\n\tcase token.LEQ: b = xx <= yy; is_bool = true\n\tcase token.GEQ: b = xx >= yy; is_bool = true\n\tcase token.LSS: b = xx < yy; is_bool = true\n\tcase token.GTR: b = xx > yy; is_bool = true\n\tdefault: err = ErrInvalidOperands{x, op, y}\n\t}\n\tif is_bool {\n\t\treturn reflect.ValueOf(b), err\n\t} else {\n\t\treturn reflect.ValueOf(r).Convert(x.Type()), err\n\t}\n}", "func (op *OpAtan) Eval(x, y float32) float32 {\n\treturn float32(math.Atan(float64(op.Child.Eval(x, y))))\n}", "func Eval(node ast.Node, env *object.Environment, stop <-chan struct{}) object.Object {\n\tselect {\n\tcase <-stop:\n\t\treturn ConstNil\n\tdefault:\n\t}\n\n\tswitch node := node.(type) {\n\t// statements\n\tcase *ast.Program:\n\t\treturn evalProgram(node, env, stop)\n\tcase *ast.LetStatement:\n\t\tval := Eval(node.Value, env, stop)\n\t\tif isError(val) {\n\t\t\treturn val\n\t\t}\n\t\tenv.Set(node.Name.Value, val)\n\tcase *ast.ReturnStatement:\n\t\tval := Eval(node.Value, env, stop)\n\t\tif isError(val) {\n\t\t\treturn val\n\t\t}\n\t\treturn &object.ReturnValue{Value: val}\n\tcase *ast.BlockStatement:\n\t\treturn evalBlockStatement(node, env, stop)\n\tcase *ast.ExpressionStatement:\n\t\treturn Eval(node.Expression, env, stop)\n\n\t\t// expressions\n\tcase *ast.PrefixExpression:\n\t\tright := Eval(node.Right, env, stop)\n\t\tif isError(right) {\n\t\t\treturn right\n\t\t}\n\t\treturn evalPrefixExpr(node.Token, right)\n\tcase *ast.InfixExpression:\n\t\tif node.Operator == token.Assign {\n\t\t\treturn evalAssign(node, env, stop)\n\t\t}\n\n\t\tleft := Eval(node.Left, env, stop)\n\t\tif isError(left) {\n\t\t\treturn left\n\t\t}\n\t\tright := Eval(node.Right, env, stop)\n\t\tif isError(right) {\n\t\t\treturn right\n\t\t}\n\t\treturn evalInfixExpr(node.Token, left, right)\n\tcase *ast.IndexExpression:\n\t\tleft := Eval(node.Left, env, stop)\n\t\tif isError(left) {\n\t\t\treturn left\n\t\t}\n\n\t\tindex := Eval(node.Index, env, stop)\n\t\tif isError(index) {\n\t\t\treturn index\n\t\t}\n\t\treturn evalIndexExpr(node.Token, left, index)\n\tcase *ast.IfExpression:\n\t\treturn evalIfExpr(node, env, stop)\n\tcase *ast.WhileExpression:\n\t\treturn evalWhileExpr(node, env, stop)\n\tcase *ast.CallExpression:\n\t\tfunction := Eval(node.Func, env, stop)\n\t\tif isError(function) {\n\t\t\treturn function\n\t\t}\n\n\t\targs, err := evalExpressions(node.Args, env, stop)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn doFunction(node.Token, function, args, stop)\n\n\t\t// literals\n\tcase *ast.IntegerLiteral:\n\t\treturn &object.Integer{Value: node.Value}\n\tcase *ast.FloatLiteral:\n\t\treturn &object.Float{Value: node.Value}\n\tcase *ast.BooleanLiteral:\n\t\treturn boolToBoolean(node.Value)\n\tcase *ast.NilLiteral:\n\t\treturn ConstNil\n\tcase *ast.FunctionLiteral:\n\t\treturn &object.Function{Params: node.Params, Body: node.Body, Env: env}\n\tcase *ast.StringLiteral:\n\t\treturn &object.String{Value: node.Value}\n\tcase *ast.ArrayLiteral:\n\t\telems, err := evalExpressions(node.Elements, env, stop)\n\t\tif len(elems) == 1 && err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn &object.Array{Elements: elems}\n\tcase *ast.Identifier:\n\t\treturn evalIdentifier(node, env)\n\tcase *ast.AccessIdentifier:\n\t\treturn evalAccessIdentifier(node, env)\n\t}\n\treturn nil\n}", "func (b *BitOp) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {\n\tlval, rval, err := b.evalLeftRight(ctx, row)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif lval == nil || rval == nil {\n\t\treturn nil, nil\n\t}\n\n\tlval, rval, err = b.convertLeftRight(ctx, lval, rval)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch strings.ToLower(b.Op) {\n\tcase sqlparser.BitAndStr:\n\t\treturn bitAnd(lval, rval)\n\tcase sqlparser.BitOrStr:\n\t\treturn bitOr(lval, rval)\n\tcase sqlparser.BitXorStr:\n\t\treturn bitXor(lval, rval)\n\tcase sqlparser.ShiftLeftStr:\n\t\treturn shiftLeft(lval, rval)\n\tcase sqlparser.ShiftRightStr:\n\t\treturn shiftRight(lval, rval)\n\t}\n\n\treturn nil, errUnableToEval.New(lval, b.Op, rval)\n}", "func (b *BinaryExpr) Evaluate(env ExpressionEnv) (EvalResult, error) {\n\tlVal, err := b.Left.Evaluate(env)\n\tif err != nil {\n\t\treturn EvalResult{}, err\n\t}\n\trVal, err := b.Right.Evaluate(env)\n\tif err != nil {\n\t\treturn EvalResult{}, err\n\t}\n\treturn b.Op.Evaluate(lVal, rVal)\n}", "func TestEvaluatorParenthesis(t *testing.T) {\n\tvar values = make(map[string]int)\n\texpression := \"(1+2)*3\"\n\n\tresult, err := evaluator.Evaluate(expression, values)\n\n\tassert.Nil(t, err, \"unexpected error\")\n\tassert.Equal(t, 9, result)\n}", "func (p *Qlang) Eval(expr string) (err error) {\n\n\treturn p.Exec([]byte(expr), \"\")\n}", "func Eval(input string, context map[string]interface{}) float64 {\n\tnode, err := Parse(input)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\texpr := &expression{node, context}\n\treturn expr.eval(expr.ast)\n}", "func evaluate(expression []string, actions ActionTable, stack *Stack) interface{} {\n\tfor _, t := range expression {\n\t\tvar action ActionFunc\n\t\tif _, err := strconv.ParseFloat(t, 64); err == nil {\n\t\t\taction = actions[\"NUMBER\"]\n\t\t} else {\n\t\t\tvar ok bool\n\t\t\tif action, ok = actions[t]; !ok {\n\t\t\t\taction = actions[\"__DEFAULT__\"]\n\t\t\t}\n\t\t}\n\t\taction(t, stack)\n\t}\n\treturn stack.Pop()\n}", "func evalBinaryFloatExpr(ctx *Ctx, x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) {\n\tvar err error\n\tvar r float64\n\n\txx, yy := x.Float(), y.Float()\n\tswitch op {\n\tcase token.ADD: r = xx + yy\n\tcase token.SUB: r = xx - yy\n\tcase token.MUL: r = xx * yy\n\tcase token.QUO: r = xx / yy\n\t// case token.EQL: b = xx == yy\n\t// case token.LSS: b = xx < yy\n\t// case token.GTR: b = xx > yy\n\tdefault: err = ErrInvalidOperands{x, op, y}\n\t}\n\treturn reflect.ValueOf(r).Convert(x.Type()), err\n}", "func execEval(_ int, p *gop.Context) {\n\targs := p.GetArgs(4)\n\tret, ret1 := types.Eval(args[0].(*token.FileSet), args[1].(*types.Package), token.Pos(args[2].(int)), args[3].(string))\n\tp.Ret(4, ret, ret1)\n}", "func (e *Evaluator) Eval(expr string) (interface{}, error) {\n\tn := e.n.Copy()\n\t_expr, err := xpath.Compile(expr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"expr cannot compile: %w\", err)\n\t}\n\n\tv := _expr.Evaluate(n)\n\tswitch v := v.(type) {\n\tcase *xpath.NodeIterator:\n\t\tns := nodes(v)\n\t\tvs := make([]interface{}, 0, len(ns))\n\t\tfor i := range ns {\n\t\t\tswitch n := ns[i].(type) {\n\t\t\tcase attr:\n\t\t\t\tvs = append(vs, n.val)\n\t\t\t}\n\t\t}\n\t\tif len(vs) == len(ns) {\n\t\t\treturn vs, nil\n\t\t}\n\t\treturn ns, nil\n\t}\n\n\treturn v, nil\n}", "func (da *DateArith) Eval(ctx context.Context, args map[interface{}]interface{}) (interface{}, error) {\n\tt, years, months, days, durations, err := da.evalArgs(ctx, args)\n\tif t.IsZero() || err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif !da.isAdd() {\n\t\tyears, months, days, durations = -years, -months, -days, -durations\n\t}\n\tt.Time = t.Time.Add(durations)\n\tt.Time = t.Time.AddDate(int(years), int(months), int(days))\n\n\t// \"2011-11-11 10:10:20.000000\" outputs \"2011-11-11 10:10:20\".\n\tif t.Time.Nanosecond() == 0 {\n\t\tt.Fsp = 0\n\t}\n\n\treturn t, nil\n}", "func (i *InExpr) eval(env *ExpressionEnv, result *EvalResult) {\n\tvar left, right EvalResult\n\tleft.init(env, i.Left)\n\tright.init(env, i.Right)\n\n\tif right.typeof() != querypb.Type_TUPLE {\n\t\tthrowEvalError(vterrors.Errorf(vtrpcpb.Code_INTERNAL, \"rhs of an In operation should be a tuple\"))\n\t}\n\tif left.null() {\n\t\tresult.setNull()\n\t\treturn\n\t}\n\n\tvar foundNull, found bool\n\tvar righttuple = right.tuple()\n\n\tif i.Hashed != nil {\n\t\thash, err := left.nullSafeHashcode()\n\t\tif err != nil {\n\t\t\tthrowEvalError(err)\n\t\t}\n\t\tif idx, ok := i.Hashed[hash]; ok {\n\t\t\tvar numeric int\n\t\t\tnumeric, foundNull, err = evalCoerceAndCompare(&left, &righttuple[idx], true)\n\t\t\tif err != nil {\n\t\t\t\tthrowEvalError(err)\n\t\t\t}\n\t\t\tfound = numeric == 0\n\t\t}\n\t} else {\n\t\tfor _, rtuple := range righttuple {\n\t\t\tnumeric, isNull, err := evalCoerceAndCompare(&left, &rtuple, true)\n\t\t\tif err != nil {\n\t\t\t\tthrowEvalError(err)\n\t\t\t}\n\t\t\tif isNull {\n\t\t\t\tfoundNull = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif numeric == 0 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch {\n\tcase found:\n\t\tresult.setBool(!i.Negate)\n\tcase foundNull:\n\t\tresult.setNull()\n\tdefault:\n\t\tresult.setBool(i.Negate)\n\t}\n}", "func (o *Operation) Operate(leftValue, rightValue int) int {\n\treturn o.Operator.Apply(leftValue, rightValue)\n}", "func evalValForAddArrow(lhs, rhs Value) (Value, error) {\n\tswitch lhs := lhs.(type) {\n\tcase Tuple:\n\t\tif rhs, ok := rhs.(Tuple); ok {\n\t\t\treturn MergeLeftToRight(lhs, rhs), nil\n\t\t}\n\tcase Dict:\n\t\tswitch rhs := rhs.(type) {\n\t\tcase Dict:\n\t\t\treturn mergeDicts(lhs, rhs), nil\n\t\tcase Set:\n\t\t\tif !rhs.IsTrue() {\n\t\t\t\treturn lhs, nil\n\t\t\t}\n\t\t}\n\tcase Set:\n\t\tif !lhs.IsTrue() {\n\t\t\tswitch rhs := rhs.(type) {\n\t\t\tcase Dict:\n\t\t\t\treturn rhs, nil\n\t\t\tcase Set:\n\t\t\t\tif !rhs.IsTrue() {\n\t\t\t\t\treturn lhs, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, errors.Errorf(\n\t\t\"Args to +> must be both tuples or both dicts, not %s and %s\",\n\t\tValueTypeAsString(lhs), ValueTypeAsString(rhs))\n}", "func internalNewEvaluator(ce []ConsumedElement) (Evaluator, []ConsumedElement, error) {\n\tif len(ce) == 0 {\n\t\treturn nil, nil, errors.New(\"cannot create an evaluator from an empty sequence of tokens\")\n\t}\n\thead, tail := ce[0], ce[1:]\n\ttkn := head.Token()\n\n\t// Not token\n\tif tkn.Type == lexer.ItemNot {\n\t\ttailEval, tailCEs, err := internalNewEvaluator(tail)\n\t\tif err != nil {\n\t\t\treturn nil, tailCEs, err\n\t\t}\n\t\te, err := NewUnaryBooleanExpression(NOT, tailEval)\n\t\tif err != nil {\n\t\t\treturn nil, tailCEs, err\n\t\t}\n\t\treturn e, tailCEs, nil\n\t}\n\n\t// Binding token\n\tif tkn.Type == lexer.ItemBinding {\n\t\tif len(tail) < 2 {\n\t\t\treturn nil, nil, fmt.Errorf(\"cannot create a binary evaluation operand for %v\", ce)\n\t\t}\n\t\topTkn, bndTkn := tail[0].Token(), tail[1].Token()\n\t\tvar op OP\n\t\tswitch opTkn.Type {\n\t\tcase lexer.ItemEQ:\n\t\t\top = EQ\n\t\tcase lexer.ItemLT:\n\t\t\top = LT\n\t\tcase lexer.ItemGT:\n\t\t\top = GT\n\t\tdefault:\n\t\t\treturn nil, nil, fmt.Errorf(\"cannot create a binary evaluation operand for %v\", opTkn)\n\t\t}\n\t\tif bndTkn.Type == lexer.ItemBinding {\n\t\t\te, err := NewEvaluationExpression(op, tkn.Text, bndTkn.Text)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tvar res []ConsumedElement\n\t\t\tif len(tail) > 2 {\n\t\t\t\tres = tail[2:]\n\t\t\t}\n\t\t\treturn e, res, nil\n\t\t}\n\t\treturn nil, nil, fmt.Errorf(\"cannot build a binary evaluation operand with right operant %v\", bndTkn)\n\t}\n\n\t// LPar Token\n\tif tkn.Type == lexer.ItemLPar {\n\t\ttailEval, ce, err := internalNewEvaluator(tail)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif len(ce) < 1 {\n\t\t\treturn nil, nil, errors.New(\"incomplete parentesis expression; missing ')'\")\n\t\t}\n\t\thead, tail = ce[0], ce[1:]\n\t\tif head.Token().Type != lexer.ItemRPar {\n\t\t\treturn nil, nil, fmt.Errorf(\"missing right parentesis in expression; found %v instead\", head)\n\t\t}\n\t\tif len(tail) > 1 {\n\t\t\t// Binary boolean expression.\n\t\t\topTkn := tail[0].Token()\n\t\t\tvar op OP\n\t\t\tswitch opTkn.Type {\n\t\t\tcase lexer.ItemAnd:\n\t\t\t\top = AND\n\t\t\tcase lexer.ItemOr:\n\t\t\t\top = OR\n\t\t\tdefault:\n\t\t\t\treturn nil, nil, fmt.Errorf(\"cannot create a binary boolean evaluation operand for %v\", opTkn)\n\t\t\t}\n\t\t\trTailEval, ceResTail, err := internalNewEvaluator(tail[1:])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tev, err := NewBinaryBooleanExpression(op, tailEval, rTailEval)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\treturn ev, ceResTail, nil\n\t\t}\n\t\treturn tailEval, tail, nil\n\t}\n\n\tvar tkns []string\n\tfor _, e := range ce {\n\t\ttkns = append(tkns, fmt.Sprintf(\"%q\", e.token.Type))\n\t}\n\treturn nil, nil, fmt.Errorf(\"could not create an evaluator for condition {%s}\", strings.Join(tkns, \",\"))\n}", "func (t *Check) Eval(r, s string) (bool, error) {\n\treturn false, errors.New(\"Not implemented\")\n}", "func TestEvaluatorBoolean(t *testing.T) {\n\tvar values = make(map[string]int)\n\ttestCases := []TestCase{\n\t\t{\n\t\t\tname: \"and\",\n\t\t\texpression: \"1 && 0\",\n\t\t\texpectedValue: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"or\",\n\t\t\texpression: \"1 || 0\",\n\t\t\texpectedValue: 1,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult, err := evaluator.Evaluate(tc.expression, values)\n\t\t\tassert.NoError(t, err, \"unexpected error\")\n\t\t\tassert.Equal(t, tc.expectedValue, result)\n\t\t})\n\t}\n}", "func TestEvaluateRPNJustArithmeticOperator(t *testing.T) {\n\t// tokens to be tokenized\n\ttokens := []evaluator.TokenWithValue{\n\t\tevaluator.OperatorToken(token.ADD),\n\t}\n\n\t// value map used during evaluation\n\tvar values = make(map[string]int)\n\n\t// evaluate expression represented as sequence of tokens in RPN order\n\t_, err := evaluator.EvaluateRPN(tokens, values)\n\n\t// check the output -> error needs to be detected\n\tassert.Error(t, err)\n}", "func (op *OpSin) Eval(x, y float32) float32 {\n\treturn float32(math.Sin(float64(op.Child.Eval(x, y))))\n}", "func Eval(node ast.Node, env *object.Environment) object.Object {\n\tswitch node := node.(type) {\n\n\t// Statements\n\tcase *ast.RootNode:\n\t\treturn evalRootNode(node, env)\n\n\tcase *ast.BlockStatement:\n\t\treturn evalBlockStmt(node, env)\n\n\tcase *ast.ExpressionStatement:\n\t\treturn Eval(node.Expression, env)\n\n\tcase *ast.ReturnStatement:\n\t\tval := Eval(node.ReturnValue, env)\n\t\tif isError(val) {\n\t\t\treturn val\n\t\t}\n\t\treturn &object.ReturnValue{Value: val}\n\n\tcase *ast.LetStatement:\n\t\tval := Eval(node.Value, env)\n\t\tif isError(val) {\n\t\t\treturn val\n\t\t}\n\t\tenv.Set(node.Name.Value, val)\n\n\tcase *ast.ConstStatement:\n\t\tval := Eval(node.Value, env)\n\t\tif isError(val) {\n\t\t\treturn val\n\t\t}\n\t\tenv.Set(node.Name.Value, val)\n\n\t// Expressions\n\tcase *ast.IntegerLiteral:\n\t\treturn &object.Integer{Value: node.Value}\n\n\tcase *ast.StringLiteral:\n\t\treturn &object.String{Value: node.Value}\n\n\tcase *ast.Boolean:\n\t\treturn nativeBoolToBooleanObj(node.Value)\n\n\tcase *ast.PrefixExpression:\n\t\tright := Eval(node.Right, env)\n\t\tif isError(right) {\n\t\t\treturn right\n\t\t}\n\t\treturn evalPrefixExpr(node.Operator, right, node.Token.Line)\n\n\tcase *ast.InfixExpression:\n\t\tleft := Eval(node.Left, env)\n\t\tif isError(left) {\n\t\t\treturn left\n\t\t}\n\t\tright := Eval(node.Right, env)\n\t\tif isError(right) {\n\t\t\treturn right\n\t\t}\n\t\treturn evalInfixExpr(node.Operator, left, right, node.Token.Line)\n\n\tcase *ast.PostfixExpression:\n\t\treturn evalPostfixExpr(env, node.Operator, node)\n\n\tcase *ast.IfExpression:\n\t\treturn evalIfExpr(node, env)\n\n\tcase *ast.Identifier:\n\t\treturn evalIdentifier(node, env)\n\n\tcase *ast.FunctionLiteral:\n\t\tparams := node.Parameters\n\t\tbody := node.Body\n\t\treturn &object.Function{\n\t\t\tParameters: params,\n\t\t\tBody: body,\n\t\t\tEnv: env,\n\t\t}\n\n\tcase *ast.CallExpression:\n\t\tfn := Eval(node.Function, env)\n\t\tif isError(fn) {\n\t\t\treturn fn\n\t\t}\n\t\targs := evalExprs(node.Arguments, env)\n\t\tif len(args) == 1 && isError(args[0]) {\n\t\t\treturn args[0]\n\t\t}\n\t\treturn applyFunction(fn, args, node.Token.Line)\n\n\tcase *ast.ArrayLiteral:\n\t\telements := evalExprs(node.Elements, env)\n\t\tif len(elements) == 1 && isError(elements[0]) {\n\t\t\treturn elements[0]\n\t\t}\n\t\treturn &object.Array{Elements: elements}\n\n\tcase *ast.IndexExpression:\n\t\tleft := Eval(node.Left, env)\n\t\tif isError(left) {\n\t\t\treturn left\n\t\t}\n\t\tindex := Eval(node.Index, env)\n\t\tif isError(index) {\n\t\t\treturn index\n\t\t}\n\t\treturn evalIndexExpr(left, index, node.Token.Line)\n\n\tcase *ast.HashLiteral:\n\t\treturn evalHashLiteral(node, env)\n\t}\n\n\treturn nil\n}", "func evalBinaryIntExpr(ctx *Ctx, x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) {\n\tvar r int64\n\tvar err error\n\tvar b bool\n\tis_bool := false\n\n\txx, yy := x.Int(), y.Int()\n\tswitch op {\n\tcase token.ADD: r = xx + yy\n\tcase token.SUB: r = xx - yy\n\tcase token.MUL: r = xx * yy\n\tcase token.QUO: r = xx / yy\n\tcase token.REM: r = xx % yy\n\tcase token.AND: r = xx & yy\n\tcase token.OR: r = xx | yy\n\tcase token.XOR: r = xx ^ yy\n\tcase token.AND_NOT: r = xx &^ yy\n\tcase token.EQL: b = xx == yy; is_bool = true\n\tcase token.NEQ: b = xx != yy; is_bool = true\n\tcase token.LEQ: b = xx <= yy; is_bool = true\n\tcase token.GEQ: b = xx >= yy; is_bool = true\n\tcase token.LSS: b = xx < yy; is_bool = true\n\tcase token.GTR: b = xx > yy; is_bool = true\n\tdefault: err = ErrInvalidOperands{x, op, y}\n\t}\n\tif is_bool {\n\t\treturn reflect.ValueOf(b), err\n\t} else {\n\t\treturn reflect.ValueOf(r).Convert(x.Type()), err\n\t}\n}", "func evalRPN(tokens []string) int {\r\n\tvar tmpStack []int\r\n\toperators := make(map[string]bool)\r\n\tfor _, o := range [...]string{\"+\", \"-\", \"*\", \"/\"} {\r\n\t\toperators[o] = true\r\n\t}\r\n\r\n\tfor _, v := range tokens {\r\n\t\tif !operators[v] {\r\n\t\t\tnu, _ := strconv.Atoi(v)\r\n\t\t\ttmpStack = append(tmpStack, nu)\r\n\t\t} else {\r\n\t\t\tb := tmpStack[(len(tmpStack) - 1)]\r\n\t\t\ta := tmpStack[(len(tmpStack) - 2)]\r\n\t\t\ttmpStack = tmpStack[:len(tmpStack)-2]\r\n\t\t\tvar res int\r\n\t\t\tif v == \"+\" {\r\n\t\t\t\tres = a + b\r\n\t\t\t} else if v == \"-\" {\r\n\t\t\t\tres = a - b\r\n\t\t\t} else if v == \"*\" {\r\n\t\t\t\tres = a * b\r\n\t\t\t} else if v == \"/\" {\r\n\t\t\t\tres = a / b\r\n\t\t\t}\r\n\t\t\ttmpStack = append(tmpStack, res)\r\n\t\t}\r\n\t}\r\n\treturn tmpStack[0]\r\n}", "func (or Or) Eval(visited []bool) bool {\n\treturn or.Left.Eval(visited) || or.Right.Eval(visited)\n}", "func (rt *operatorRuntime) numOp(op func(float64, float64) interface{},\n\tvs parser.Scope, is map[string]interface{}, tid uint64) (interface{}, error) {\n\tvar ok bool\n\tvar res1, res2 interface{}\n\tvar err error\n\n\terrorutil.AssertTrue(len(rt.node.Children) == 2,\n\t\tfmt.Sprint(\"Operation requires 2 operands\", rt.node))\n\n\tif res1, err = rt.node.Children[0].Runtime.Eval(vs, is, tid); err == nil {\n\t\tif res2, err = rt.node.Children[1].Runtime.Eval(vs, is, tid); err == nil {\n\t\t\tvar res1Num, res2Num float64\n\n\t\t\tif res1Num, ok = res1.(float64); !ok {\n\t\t\t\terr = rt.erp.NewRuntimeError(util.ErrNotANumber,\n\t\t\t\t\trt.errorDetailString(rt.node.Children[0].Token, res1), rt.node.Children[0])\n\n\t\t\t} else {\n\t\t\t\tif res2Num, ok = res2.(float64); !ok {\n\t\t\t\t\terr = rt.erp.NewRuntimeError(util.ErrNotANumber,\n\t\t\t\t\t\trt.errorDetailString(rt.node.Children[1].Token, res2), rt.node.Children[1])\n\n\t\t\t\t} else {\n\n\t\t\t\t\treturn op(res1Num, res2Num), err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, err\n}", "func (rt *operatorRuntime) genOp(op func(interface{}, interface{}) interface{},\n\tvs parser.Scope, is map[string]interface{}, tid uint64) (interface{}, error) {\n\n\tvar ret interface{}\n\n\terrorutil.AssertTrue(len(rt.node.Children) == 2,\n\t\tfmt.Sprint(\"Operation requires 2 operands\", rt.node))\n\n\tres1, err := rt.node.Children[0].Runtime.Eval(vs, is, tid)\n\tif err == nil {\n\t\tvar res2 interface{}\n\n\t\tif res2, err = rt.node.Children[1].Runtime.Eval(vs, is, tid); err == nil {\n\t\t\tret = op(res1, res2)\n\t\t}\n\t}\n\n\treturn ret, err\n}", "func TestPerformArithmeticOperation(t *testing.T) {\n\t// operand stack (also known as data stack)\n\tstack := evaluator.Stack{}\n\n\t// push two values onto the stack\n\tstack.Push(1)\n\tstack.Push(2)\n\n\t// any token that is not token.QUO or token.REM\n\ttok := token.ADD\n\n\t// perform the selected arithmetic operation\n\taddOperation := func(x int, y int) int { return x + y }\n\n\t// perform the selected arithmetic operation\n\terr := evaluator.PerformArithmeticOperation(&stack, addOperation, tok)\n\tassert.NoError(t, err)\n\n\t// check stack\n\tassert.False(t, stack.Empty())\n\tassert.Equal(t, stack.Size(), 1)\n\n\t// stack should contain one value\n\tvalue, err := stack.Pop()\n\tassert.NoError(t, err)\n\tassert.Equal(t, value, 3)\n}", "func Eval(t testing.TestingT, options *EvalOptions, jsonFilePaths []string, resultQuery string) {\n\trequire.NoError(t, EvalE(t, options, jsonFilePaths, resultQuery))\n}", "func Evaluate(input string) (decimal.Decimal, error) {\n\tvar stack []decimal.Decimal\n\tinputs := strings.Split(input, \" \")\n\n\tfor _, command := range inputs {\n\t\tswitch command {\n\t\tcase \"+\", \"-\", \"*\", \"/\", \"%\", \"^\":\n\t\t\tif len(stack) < 2 {\n\t\t\t\treturn decimal.Zero, errors.New(\"stack overflow\")\n\t\t\t}\n\t\t\tlhs := stack[len(stack)-2]\n\t\t\trhs := stack[len(stack)-1]\n\t\t\tstack = stack[:len(stack)-1]\n\t\t\tswitch command {\n\t\t\tcase \"+\":\n\t\t\t\trhs = lhs.Add(rhs)\n\t\t\tcase \"-\":\n\t\t\t\trhs = lhs.Sub(rhs)\n\t\t\tcase \"*\":\n\t\t\t\trhs = lhs.Mul(rhs)\n\t\t\tcase \"/\":\n\t\t\t\trhs = lhs.Div(rhs)\n\t\t\tcase \"%\":\n\t\t\t\trhs = lhs.Mod(rhs)\n\t\t\tcase \"^\":\n\t\t\t\trhs = lhs.Pow(rhs)\n\t\t\t}\n\t\t\tstack[len(stack)-1] = rhs\n\t\tcase \"abs\", \"atan\", \"ceil\", \"cos\", \"floor\", \"neg\", \"sin\", \"tan\":\n\t\t\tif len(stack) < 1 {\n\t\t\t\treturn decimal.Zero, errors.New(\"stack overflow\")\n\t\t\t}\n\t\t\tval := stack[len(stack)-1]\n\t\t\tswitch command {\n\t\t\tcase \"abs\":\n\t\t\t\tval = val.Abs()\n\t\t\tcase \"atan\":\n\t\t\t\tval = val.Atan()\n\t\t\tcase \"ceil\":\n\t\t\t\tval = val.Ceil()\n\t\t\tcase \"cos\":\n\t\t\t\tval = val.Cos()\n\t\t\tcase \"floor\":\n\t\t\t\tval = val.Floor()\n\t\t\tcase \"neg\":\n\t\t\t\tval = val.Neg()\n\t\t\tcase \"sin\":\n\t\t\t\tval = val.Sin()\n\t\t\tcase \"tan\":\n\t\t\t\tval = val.Tan()\n\t\t\t}\n\t\t\tstack[len(stack)-1] = val\n\t\tdefault:\n\t\t\tval, err := decimal.NewFromString(command)\n\t\t\tif err != nil {\n\t\t\t\treturn val, err\n\t\t\t}\n\t\t\tstack = append(stack, val)\n\t\t}\n\t}\n\n\tif len(stack) != 1 {\n\t\treturn decimal.Zero, errors.New(\"unclean stack\")\n\t}\n\treturn stack[0], nil\n}", "func (client *Client) Eval(scriptText string, keys, args []interface{}) (*Variable, error) {\n _args := make([]interface{}, 2+len(keys)+len(args))\n _args[0] = scriptText\n _args[1] = len(keys)\n for i:=0; i<len(keys); i+=1 {\n\t_args[i+2] = keys[i]\n }\n for i:=0; i<len(args); i+=1 {\n\t_args[i+2+len(keys)] = args[i]\n }\n return client.Do(\"EVAL\", _args...)\n}", "func TestPerformArithmeticOperationMissingBothOperands(t *testing.T) {\n\t// operand stack (also known as data stack)\n\tstack := evaluator.Stack{}\n\n\t// stack is empty!\n\n\t// any token that is not token.QUO or token.REM\n\ttok := token.ADD\n\n\t// perform the selected arithmetic operation\n\taddOperation := func(x int, y int) int { return x + y }\n\n\t// perform the selected arithmetic operation\n\terr := evaluator.PerformArithmeticOperation(&stack, addOperation, tok)\n\tassert.Error(t, err)\n}", "func perform_arthmitic(result *int, operation string, left_value int, right_value int) {\n if operation == \"+\" {\n *result = left_value + right_value\n\n } else if operation == \"-\" {\n *result = left_value - right_value\n\n } else if operation == \"*\" {\n *result = left_value * right_value\n\n } else if operation == \".\" {\n *result = left_value / right_value\n }\n}", "func (s server) Eval(ctx context.Context, req *entity.Request) (*entity.Result, error) {\n\tlog.Printf(\"Received a request: %+v\\n\", req)\n\tresult := &entity.Result{}\n\tres, err := s.usecase.Eval(req.Value)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tresult.Value = strconv.FormatFloat(res, 'G', -1, 64)\n\treturn result, nil\n}", "func (e *binaryExprEvaluator) run() {\n\tfor {\n\t\t// Read LHS value.\n\t\tlhs, ok := <-e.lhs.C()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\t// Read RHS value.\n\t\trhs, ok := <-e.rhs.C()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\t// Merge maps.\n\t\tm := make(map[string]interface{})\n\t\tfor k, v := range lhs {\n\t\t\tm[k] = e.eval(v, rhs[k])\n\t\t}\n\t\tfor k, v := range rhs {\n\t\t\t// Skip value if already processed in lhs loop.\n\t\t\tif _, ok := m[k]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm[k] = e.eval(float64(0), v)\n\t\t}\n\n\t\t// Return value.\n\t\te.c <- m\n\t}\n\n\t// Mark the channel as complete.\n\tclose(e.c)\n}", "func (v LiteralValue) Eval(*Environment) (document.Value, error) {\n\treturn document.Value(v), nil\n}", "func (s *STEquals) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {\n\tgeom1, err := s.Left.Eval(ctx, row)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgeom2, err := s.Right.Eval(ctx, row)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tg1, g2, err := validateGeomComp(geom1, geom2, s.FunctionName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif g1 == nil || g2 == nil {\n\t\treturn nil, nil\n\t}\n\n\t// TODO (james): remove this switch block when the other comparisons are implemented\n\tswitch geom1.(type) {\n\tcase types.LineString:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"LineString\", s.FunctionName())\n\tcase types.Polygon:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"Polygon\", s.FunctionName())\n\tcase types.MultiPoint:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"MultiPoint\", s.FunctionName())\n\tcase types.MultiLineString:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"MultiLineString\", s.FunctionName())\n\tcase types.MultiPolygon:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"MultiPolygon\", s.FunctionName())\n\tcase types.GeomColl:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"GeomColl\", s.FunctionName())\n\t}\n\n\t// TODO (james): remove this switch block when the other comparisons are implemented\n\tswitch geom2.(type) {\n\tcase types.LineString:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"LineString\", s.FunctionName())\n\tcase types.Polygon:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"Polygon\", s.FunctionName())\n\tcase types.MultiPoint:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"MultiPoint\", s.FunctionName())\n\tcase types.MultiLineString:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"MultiLineString\", s.FunctionName())\n\tcase types.MultiPolygon:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"MultiPolygon\", s.FunctionName())\n\tcase types.GeomColl:\n\t\treturn nil, sql.ErrUnsupportedGISTypeForSpatialFunc.New(\"GeomColl\", s.FunctionName())\n\t}\n\n\treturn isEqual(g1, g2), nil\n}", "func (s *String) Eval(_, _ *Scope) (Value, error) {\n\treturn s, nil\n}", "func Evaluate(expression *[]string, dispatchTable DispatchTable, stack *Stack) interface{} {\n\n\tfor idx, token := range *expression {\n\t\tvar dispatchFunction DispatchFunc\n\n\t\tif _, err := strconv.ParseFloat(token, 64); err == nil {\n\t\t\tdispatchFunction = dispatchTable[\"FLOAT\"]\n\t\t} else {\n\t\t\tvar evalsOk bool\n\t\t\tif dispatchFunction, evalsOk = dispatchTable[token]; !evalsOk {\n\t\t\t\tdispatchFunction = dispatchTable[\"__DEFAULT__\"]\n\t\t\t\t// delete token from expression\n\t\t\t\tcopy((*expression)[idx:], (*expression)[idx+1:])\n\t\t\t\t(*expression)[len(*expression)-1] = \"\"\n\t\t\t\t(*expression) = (*expression)[:len(*expression)-1]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tdispatchFunction(token, stack)\n\t}\n\treturn stack.Pop()\n}", "func Evaluate(e ast.Node, genCtx *GenCtx) parser_driver.ValueExpr {\n\tswitch t := e.(type) {\n\tcase *ast.ParenthesesExpr:\n\t\treturn Evaluate(t.Expr, genCtx)\n\tcase *ast.BinaryOperationExpr:\n\t\tres, err := operator.BinaryOps.Eval(t.Op.String(), Evaluate(t.L, genCtx), Evaluate(t.R, genCtx))\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"error occurred on eval: %+v\", err))\n\t\t}\n\t\treturn res\n\tcase *ast.UnaryOperationExpr:\n\t\tres, err := operator.UnaryOps.Eval(t.Op.String(), Evaluate(t.V, genCtx))\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"error occurred on eval: %+v\", err))\n\t\t}\n\t\treturn res\n\tcase *ast.IsNullExpr:\n\t\tsubResult := Evaluate(t.Expr, genCtx)\n\t\tc := ConvertToBoolOrNull(subResult)\n\t\tr := parser_driver.ValueExpr{}\n\t\tr.SetInt64(0)\n\t\tif c == -1 {\n\t\t\tr.SetInt64(1)\n\t\t}\n\t\treturn r\n\tcase *ast.ColumnNameExpr:\n\t\tfor key, value := range genCtx.unwrapPivotRows {\n\t\t\toriginTableName := t.Name.Table.L\n\t\t\tfor k, v := range genCtx.TableAlias {\n\t\t\t\tif v == originTableName {\n\t\t\t\t\toriginTableName = k\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\toriginColumnName := t.Name.Name.L\n\t\t\tif key == fmt.Sprintf(\"%s.%s\", originTableName, originColumnName) {\n\t\t\t\tv := parser_driver.ValueExpr{}\n\t\t\t\tv.SetValue(value)\n\t\t\t\tif tmpTable, ok := genCtx.TableAlias[t.Name.Table.L]; ok {\n\t\t\t\t\tt.Name.Table = model.NewCIStr(tmpTable)\n\t\t\t\t}\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t\tpanic(fmt.Sprintf(\"no such col %s in table %s\", t.Name, t.Name.Table))\n\tcase ast.ValueExpr:\n\t\tv := parser_driver.ValueExpr{}\n\t\tv.SetValue(t.GetValue())\n\t\tv.SetType(t.GetType())\n\t\treturn v\n\t}\n\n\t// is useless?\n\t// if e == nil {\n\t// \treturn trueValueExpr()\n\t// }\n\n\tpanic(\"not reachable\")\n\tv := parser_driver.ValueExpr{}\n\tv.SetNull()\n\treturn v\n}", "func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {\n\t// The factory based Eval() differs from the standard evaluation model in that it generates a\n\t// new EvalState instance for each call to ensure that unique evaluations yield unique stateful\n\t// results.\n\tstate := interpreter.NewEvalState()\n\tcostTracker := &interpreter.CostTracker{}\n\tdet := &EvalDetails{state: state, costTracker: costTracker}\n\n\t// Generate a new instance of the interpretable using the factory configured during the call to\n\t// newProgram(). It is incredibly unlikely that the factory call will generate an error given\n\t// the factory test performed within the Program() call.\n\tp, err := gen.factory(state, costTracker)\n\tif err != nil {\n\t\treturn nil, det, err\n\t}\n\n\t// Evaluate the input, returning the result and the 'state' within EvalDetails.\n\tv, _, err := p.Eval(input)\n\tif err != nil {\n\t\treturn v, det, err\n\t}\n\treturn v, det, nil\n}", "func (n *NotLikeOp) Evaluate(left, right EvalResult) (EvalResult, error) {\n\tpanic(\"implement me\")\n}", "func evaluateExpression(c *Context, exp interface{}) interface{} {\r\n var val interface{}\r\n\r\n // fmt.Printf(\"Evaluating type %T, \\n\", exp)\r\n switch t := exp.(type) {\r\n case int:\r\n // fmt.Printf(\"Returning int %d\\n\", t)\r\n val = t\r\n case *Integer:\r\n val = t.Number\r\n case *StringPrimitive:\r\n val = t.str\r\n case string:\r\n val = t\r\n case []interface{}:\r\n val = t\r\n case *InfixExpression:\r\n // fmt.Printf(\"Evaluating infix expresison %T l: %T, r:%T\\n\", t,t.leftNode.Exp, t.rightNode.Exp)\r\n //Get the value of the left node and right\r\n lVal := evaluateExpression(c, t.leftNode.Exp)\r\n rVal := evaluateExpression(c, t.rightNode.Exp)\r\n\r\n\r\n //then apply the correct infix operator to the values\r\n val = evaluateInfixExpression(c, t.opType, lVal, rVal)\r\n\r\n case *Identifier:\r\n // fmt.Printf(\"Was identifier returning %v\\n\", t.id)\r\n if(t.id == \"nil\") {\r\n val = NewNil(0)\r\n } else {\r\n // fmt.Printf(\"Posssible indeitEifer %T\\n\", c.values[t.id])\r\n val = evaluateExpression(c, c.values[t.id])\r\n }\r\n case *CallExpression:\r\n // fmt.Printf(\"Evaluation call to %s\\n\",t.callee)\r\n\r\n //get declaration of call\r\n callDec := c.lookup(t.callee).(*FuncDeclaration)\r\n if(callDec.returnType == \"\") { //no rreturn type = unit\r\n val = &UnitType{}\r\n } else { //Evaluate the expression of the body for a value\r\n //This should produce a value and will execute all\r\n //of the code of the body as well\r\n for i, _ := range callDec.paramNodes {\r\n paramDec := callDec.paramNodes[i].Exp.(*Param)\r\n paramValue := t.paramNodes[i].Exp\r\n c.values[paramDec.id] = evaluateExpression(c, paramValue)\r\n val = c.values[paramDec.id]\r\n }\r\n\r\n }\r\n\r\n if(t.callee == \"printi\") {\r\n invokePrintI(c, t)\r\n } else if(t.callee == \"print\") {\r\n invokePrint(c, t)\r\n } else if(t.callee == \"not\") {\r\n invokeNot(c, t)\r\n } else { //Regular other user defined function do your thing!\r\n //invoke the body\r\n //Get the declaration of the calling function so we can execute it\r\n callDec := c.lookup(t.callee).(*FuncDeclaration)\r\n // fmt.Printf(\"Invoking random func \\n\")\r\n evaluateExpression(c, callDec.body.Exp)\r\n }\r\n case *IfThenElseExpression:\r\n condition := evaluateExpression(c, t.condNode.Exp).(bool)\r\n // fmt.Printf(\"Cond was %v \\n\", condition)\r\n //If else is nil then its an IfThen Exp\r\n if(t.elseNode == nil) {\r\n val = &UnitType{}\r\n if(condition) { //if the condition is true evaluatie the code inside\r\n evaluateExpression(c, t.thenNode.Exp)\r\n }\r\n } else { //otherwise its and ifThenElse\r\n if(condition) {\r\n val = evaluateExpression(c, t.thenNode.Exp)\r\n } else {\r\n val = evaluateExpression(c, t.elseNode.Exp)\r\n }\r\n }\r\n case *SeqExpression:\r\n // Value is equivalent to the last node of the seqence expression\r\n if(len(t.nodes) == 0) {\r\n val = &UnitType{}\r\n } else {\r\n // fmt.Printf(\"Seq type was %T\\n\", t.nodes[len(t.nodes)-1].Exp)\r\n val = evaluateExpression(c, t.nodes[len(t.nodes)-1].Exp)\r\n }\r\n case *Nil:\r\n val = NewNil(0)\r\n case *ArrayExp:\r\n arrType := getType(c, c.lookup(t.typeId)).(*Identifier)\r\n val = c.lookup(arrType.id)\r\n case *ForExpression:\r\n val = &UnitType{}\r\n case *LetExpression:\r\n if(len(t.exps) == 0) {\r\n val = &UnitType{}\r\n } else {\r\n // fmt.Printf(\"%T is last exp type\\n\", t.exps[len(t.exps)-1].Exp)\r\n // val = getType(c, t.exps[len(t.exps)-1].Exp)\r\n }\r\n case *Assignment:\r\n val = &UnitType{}\r\n case *RecordExp:\r\n var slc []interface{}\r\n for _, fcNode := range t.fieldCreateNodes {\r\n if b, isABinding := fcNode.Exp.(*Binding); isABinding {\r\n slc = append(slc, evaluateExpression(c, b.exp.Exp))\r\n }\r\n }\r\n val = slc\r\n default:\r\n fmt.Fprintf(os.Stderr, \"Could not evaluate exp %T\\n\", t)\r\n os.Exit(4)\r\n }\r\n\r\n return val\r\n}", "func newBinaryExprEvaluator(e *Executor, op Token, lhs, rhs processor) *binaryExprEvaluator {\n\treturn &binaryExprEvaluator{\n\t\texecutor: e,\n\t\top: op,\n\t\tlhs: lhs,\n\t\trhs: rhs,\n\t\tc: make(chan map[string]interface{}, 0),\n\t\tdone: make(chan chan struct{}, 0),\n\t}\n}", "func (op *OpCos) Eval(x, y float32) float32 {\n\treturn float32(math.Cos(float64(op.Child.Eval(x, y))))\n}", "func (lscript *Scripting) Eval(luacmd string, arguments ...interface{}) (*ScriptingReturnValues, error) {\n\targs := asScriptingArgs(arguments...)\n\tlargs := forLua(args)\n\tfor _, larg := range largs {\n\t\tlscript.Push(larg)\n\t}\n\tvar r *ScriptingReturnValues\n\terr := lscript.DoString(luacmd)\n\tif err != nil {\n\t\tT().P(\"script\", \"lua\").Errorf(\"scripting error: %s\", err.Error())\n\t} else {\n\t\tif err == nil {\n\t\t\tT().P(\"lua\", \"eval\").Debugf(\"%d return values on the stack\", lscript.GetTop())\n\t\t\tr = lscript.returnFromScripting(lscript.GetTop()) // return all values on the stack\n\t\t}\n\t}\n\treturn r, err\n}", "func eval(list []*Item) int {\n\n\tvar stack *Item\n\n\tfor _, node := range list {\n\n\t\tif node.Typ == Number {\n\t\t\tstack = stack.Push(node)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar left, right *Item\n\n\t\tstack, right = stack.Pop()\n\t\tstack, left = stack.Pop()\n\n\t\tvar val int\n\t\tswitch node.Operation {\n\t\tcase \"+\":\n\t\t\tval = left.Value + right.Value\n\t\tcase \"-\":\n\t\t\tval = left.Value - right.Value\n\t\tcase \"/\":\n\t\t\t// Watch for div-by-zero\n\t\t\tval = left.Value / right.Value\n\t\tcase \"*\":\n\t\t\tval = left.Value * right.Value\n\t\t}\n\t\tstack = stack.Push(&Item{Typ: Number, Value: val})\n\t}\n\n\treturn stack.Value\n}", "func (and And) Eval(visited []bool) bool {\n\treturn and.Left.Eval(visited) && and.Right.Eval(visited)\n\n}" ]
[ "0.71639663", "0.71103054", "0.68605703", "0.6728326", "0.67086935", "0.66818875", "0.66279435", "0.6556594", "0.65050507", "0.64583486", "0.6408754", "0.6372101", "0.63654286", "0.63654286", "0.6364129", "0.6360179", "0.626232", "0.6259256", "0.625477", "0.6253183", "0.6253183", "0.6212351", "0.6189511", "0.6145318", "0.61330205", "0.6128525", "0.6098636", "0.6016037", "0.5999483", "0.5996716", "0.599466", "0.5986107", "0.5976745", "0.5976745", "0.59553224", "0.59492475", "0.5943151", "0.5914787", "0.5911168", "0.5910981", "0.5908939", "0.5897979", "0.58594596", "0.5839726", "0.5835621", "0.5823702", "0.58194804", "0.5810095", "0.58058625", "0.5798859", "0.5787894", "0.576559", "0.5754709", "0.5747737", "0.57408476", "0.5728999", "0.5724947", "0.5724252", "0.56936526", "0.5685216", "0.56706375", "0.56617016", "0.56615716", "0.5632839", "0.5621141", "0.5615518", "0.5604862", "0.5595838", "0.5594752", "0.55882543", "0.55835485", "0.55761516", "0.5570967", "0.5566095", "0.5533981", "0.55291945", "0.55265397", "0.5520721", "0.55095476", "0.55018973", "0.549351", "0.5489902", "0.5488946", "0.54878026", "0.5482913", "0.54776573", "0.54769075", "0.5473077", "0.5466148", "0.5464075", "0.5438749", "0.5436982", "0.543604", "0.54333675", "0.54274684", "0.5418573", "0.5414274", "0.5411514", "0.5398534", "0.5398032" ]
0.7526083
0
newLiteralProcessor returns a literalProcessor for a given value.
func newLiteralProcessor(val interface{}) *literalProcessor { return &literalProcessor{ val: val, c: make(chan map[string]interface{}, 0), done: make(chan chan struct{}, 0), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewLiteral(value Object) Expression {\n\treturn &literal{value: value}\n}", "func NewProcessor(name string) *Processor {\n\tconst (\n\t\tramin = 0\n\t\tramax = 360\n\t\tdecmin = -90\n\t\tdecmax = +90\n\t\tnra = 36\n\t\tndec = 18\n\t)\n\n\treturn &Processor{\n\t\tname: name,\n\t\tmsg: logger.New(name),\n\t\tRunFMMDb: make(map[int]RunFieldMinMax),\n\t\tRaDec: RaDecLim{\n\t\t\tMin: RaDec{\n\t\t\t\tRa: ramin,\n\t\t\t\tDec: decmin,\n\t\t\t},\n\t\t\tMax: RaDec{\n\t\t\t\tRa: ramax,\n\t\t\t\tDec: decmax,\n\t\t\t},\n\t\t\tNbRa: nra,\n\t\t\tNbDec: ndec,\n\t\t\tDeltaRa: (ramax - ramin) / float64(nra),\n\t\t\tDeltaDec: (decmax - decmin) / float64(ndec),\n\t\t},\n\t\tFlux: [2]float64{0, 5.0e5},\n\t}\n}", "func NewProcessor(provider metrics.Provider, lister ContainerAccessor, adapter MetricsAdapter, filter ContainerFilter) Processor {\n\treturn Processor{\n\t\tmetricsProvider: provider,\n\t\tctrLister: lister,\n\t\tmetricsAdapter: adapter,\n\t\tctrFilter: filter,\n\t\textensions: map[string]ProcessorExtension{\n\t\t\tNetworkExtensionID: NewProcessorNetwork(),\n\t\t},\n\t}\n}", "func NewProcessor() *Processor {\n\tp := &Processor{}\n\tp.Nodes = make(map[string]*Node)\n\treturn p\n}", "func (e *exprHelper) NewLiteral(value ref.Val) ast.Expr {\n\treturn e.exprFactory.NewLiteral(e.nextMacroID(), value)\n}", "func NewProcessor(numWorkers int, fn MappingFunction, handler OutputHandler) *Processor {\n\tproc := &Processor{\n\t\tnumWorkers: numWorkers,\n\t\tfn: fn,\n\t\thandler: handler,\n\t\tInput: make(chan bobstore.Ref),\n\t\toutput: make(chan *Output, numWorkers),\n\t}\n\n\tproc.wg.Add(numWorkers)\n\tfor i := 0; i < numWorkers; i++ {\n\t\tgo runWorker(proc)\n\t}\n\n\tproc.hwg.Add(1)\n\tgo runHandler(proc)\n\n\treturn proc\n}", "func newSpanProcessor(config Config) (*spanProcessor, error) {\n\tskipExpr, err := filterspan.NewSkipExpr(&config.MatchConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsp := &spanProcessor{\n\t\tconfig: config,\n\t\tskipExpr: skipExpr,\n\t}\n\n\t// Compile ToAttributes regexp and extract attributes names.\n\tif config.Rename.ToAttributes != nil {\n\t\tfor _, pattern := range config.Rename.ToAttributes.Rules {\n\t\t\tre, err := regexp.Compile(pattern)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid regexp pattern %s\", pattern)\n\t\t\t}\n\n\t\t\trule := toAttributeRule{\n\t\t\t\tre: re,\n\t\t\t\t// Subexpression names will become attribute names during extraction.\n\t\t\t\tattrNames: re.SubexpNames(),\n\t\t\t}\n\n\t\t\tsp.toAttributeRules = append(sp.toAttributeRules, rule)\n\t\t}\n\t}\n\n\treturn sp, nil\n}", "func Literal(s string) Pattern {\n\treturn &LiteralNode{\n\t\tStr: s,\n\t}\n}", "func NewProcessor(indexer *comments.Indexer, repository battles.RepositoryInterface) *Processor {\n\treturn &Processor{\n\t\tindexer: indexer,\n\t\trepository: repository,\n\t}\n}", "func NewProcessor(name, version string, cf ConfigFunc, pf ProcessorFunc) ReceiveSendComponent {\n\treturn &processor{\n\t\tname: fmt.Sprintf(\"processor-%s\", name),\n\t\tversion: version,\n\t\tcf: cf,\n\t\tpf: pf,\n\t}\n}", "func newPerfProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig, client *http.Client) (ingestion.Processor, error) {\n\treturn &perfProcessor{\n\t\tstore: ptracestore.Default,\n\t\tvcs: vcs,\n\t}, nil\n}", "func NewProcessor(m []Mailet) *Processor {\n\treturn &Processor{mailets: m}\n}", "func NewStandard() Processor {\n\treturn standard{}\n}", "func NewLiteral(arg interface{}) Expression {\n\treturn &Literal{Literal: NewDatum(arg)}\n}", "func New(name string, store OperationStoreClient, pc protocol.Client) *OperationProcessor {\n\treturn &OperationProcessor{name: name, store: store, pc: pc}\n}", "func NewProcessor(kubeconfig string) (*Processor, error) {\n\n\tgen, err := generator.NewGenerator()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create generator: %v\", err)\n\t}\n\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := clientset.ServerVersion()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Processor{\n\t\tk8sClient: clientset,\n\t\tresourceNamePrefix: map[string]bool{},\n\t\tserverGitVersion: info.GitVersion,\n\t\tgen: gen,\n\t}, nil\n}", "func NewProcessor(\n\tname string,\n\tlog logr.Logger,\n\tcc ClientCommandRunner,\n\tmeterDefStore *MeterDefinitionStore,\n\tprocessor ObjectResourceMessageProcessor,\n) Processor {\n\treturn &processorImpl{\n\t\tname: name,\n\t\tlog: log,\n\t\tcc: cc,\n\t\tmeterDefStore: meterDefStore,\n\t\tprocessor: processor,\n\t\tdigestersSize: 1,\n\t\tretryCount: 3,\n\t}\n}", "func NewProcessor(url *url.URL) (core.EventProcessor, error) {\n\treturn &EventProcessor{}, nil\n}", "func (m *Model) NewProc(ctx nn.Context) nn.Processor {\n\treturn &Processor{\n\t\tBaseProcessor: nn.BaseProcessor{\n\t\t\tModel: m,\n\t\t\tMode: ctx.Mode,\n\t\t\tGraph: ctx.Graph,\n\t\t\tFullSeqProcessing: true,\n\t\t},\n\t\tscaleFactor: m.ScaleFactor,\n\t\tquery: m.Query.NewProc(ctx).(*linear.Processor),\n\t\tvalue: m.Value.NewProc(ctx).(*linear.Processor),\n\t\tr: ctx.Graph.NewWrap(m.R),\n\t\tAttention: nil,\n\t}\n}", "func New(kclient *k8sutil.K8sutil, baseImage string) (*Processor, error) {\n\tp := &Processor{\n\t\tk8sclient: kclient,\n\t\tbaseImage: baseImage,\n\t\tclusters: make(map[string]Cluster),\n\t}\n\n\treturn p, nil\n}", "func NewProcessor(name string, cfg Config, process ProcessFunction, demux ChannelDemux) Processor {\n\treturn Processor{name: name, cfg: cfg, process: process, demux: demux}\n}", "func MakeFromLiteral(lit string, tok token.Token, zero uint) Value {\n\tswitch tok {\n\tcase token.RAT:\n\t\treturn MakeRatFromString(lit[:len(lit)-1])\n\t}\n\treturn constant.MakeFromLiteral(lit, gotoken.Token(tok), zero)\n}", "func (s *BaseCobol85PreprocessorListener) EnterLiteral(ctx *LiteralContext) {}", "func New(value interface{}) error {\n\tif value == nil {\n\t\treturn nil\n\t}\n\treturn NewText(gconv.String(value))\n}", "func (m *Manager) NewProcessor(conf lprocessor.Config) (processor.V1, error) {\n\treturn bundle.AllProcessors.Init(conf, m)\n}", "func (b *Builder) createProcessor(typeName string, name string) error {\n\tif _, exists := b.localInstances.Get(name); exists {\n\t\treturn fmt.Errorf(\"instance name %s already exists\", name)\n\t}\n\n\t//Instantiate the processor according to its type\n\tctor, exists := b.constructors[typeName]\n\tif !exists {\n\t\treturn fmt.Errorf(\"failed to find constructor for instance type %s\", typeName)\n\t}\n\tinstance, err := ctor.call()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creation of instance (%s, %s) failed: %s\", typeName, name, err)\n\t}\n\tb.localInstances.Set(name, &ProcessorInfo{\n\t\tinstance: instance,\n\t})\n\treturn nil\n}", "func lit(s string) psec.Parser {\n\treturn psec.Literal(s)\n}", "func NewProcessor(battlesRepository battles.RepositoryInterface, usersRepository users.RepositoryInterface) *Processor {\n\treturn &Processor{\n\t\tbattlesRepository: battlesRepository,\n\t\tusersRepository: usersRepository,\n\t}\n}", "func NewBooleanLiteral(s string) Expression {\n\tif s == \"true\" {\n\t\treturn &booleanLiteral{value: &boolean{value: true}}\n\t} else if s == \"false\" {\n\t\treturn &booleanLiteral{value: &boolean{value: false}}\n\t} else {\n\t\tpanic(\"unexpected boolean value\")\n\t}\n}", "func NewProcessor(cfg ProcessorSettings) (*Processor, error) {\n\treturn newProcessor(cfg, obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled())\n}", "func (s *BasePCREListener) EnterLiteral(ctx *LiteralContext) {}", "func (p *literalProcessor) name() string { return \"\" }", "func (v *String) FromLiteralValue(l schema.LiteralValue) error {\n\tif l == nil {\n\t\t*v = String{\"\", false}\n\t\treturn nil\n\t}\n\n\tif PermissiveInputParsing {\n\t\tswitch c := l.(type) {\n\t\tcase schema.LiteralBool:\n\t\t\t*v = String{fmt.Sprintf(\"%v\", c), true}\n\t\t\tPermissiveInputCallback(\"String\", l)\n\t\t\treturn nil\n\t\tcase schema.LiteralNumber:\n\t\t\t*v = String{fmt.Sprintf(\"%v\", c), true}\n\t\t\tPermissiveInputCallback(\"String\", l)\n\t\t\treturn nil\n\t\t}\n\t}\n\tswitch c := l.(type) {\n\tcase schema.LiteralString:\n\t\ts := string(c)\n\t\t*v = String{s, true}\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"Literal value %v is not a string\", l)\n\t}\n}", "func literal(lit string) *goraptor.Literal {\n\treturn &goraptor.Literal{Value: lit}\n}", "func NewProcessedTerm(source Category, operation Operation, sink Category) ProcessedTerm {\n\treturn &processedTerm{\n\t\tSource: source,\n\t\tSink: sink,\n\t\tOperation: operation}\n}", "func (p *Parser) parseRegexpLiteral() asti.ExpressionI {\n\n\tflags := \"\"\n\n\tval := p.curToken.Literal\n\tif strings.HasPrefix(val, \"(?\") {\n\t\tval = strings.TrimPrefix(val, \"(?\")\n\n\t\ti := 0\n\t\tfor i < len(val) {\n\n\t\t\tif val[i] == ')' {\n\n\t\t\t\tval = val[i+1:]\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tflags += string(val[i])\n\t\t\t}\n\n\t\t\ti++\n\t\t}\n\t}\n\treturn &ast.RegexpLiteral{Token: p.curToken, Value: val, Flags: flags}\n}", "func (s *Str) Literal() {}", "func (*Base) LiteralString(p ASTPass, node *ast.LiteralString, ctx Context) {\n}", "func (p *literalProcessor) start() { go p.run() }", "func NewProcessor(network string) *Processor {\n\treturn &Processor{\n\t\t[]*btcutil.Address{},\n\t\tutil.GetNetworkTypeByString(network),\n\t\tmake(map[string]*btcutil.Tx, 0),\n\t}\n}", "func NewGenerator(regex string) (*Generator, error) {\n\tre, err := syntax.Parse(regex, syntax.Perl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//fmt.Println(\"Compiled re \", re)\n\treturn &Generator{\n\t\tre: re,\n\t\trand: rand.New(rand.NewSource(time.Now().UnixNano())),\n\t}, nil\n}", "func NewImageProcessor(unlabeledPath, labeledPath string) (Processor, error) {\n\tif _, err := os.Stat(unlabeledPath); os.IsNotExist(err) {\n\t\treturn nil, errors.New(\"unlabeled path doesn't exists\")\n\t}\n\n\tif _, err := os.Stat(labeledPath); os.IsNotExist(err) {\n\t\treturn nil, errors.New(\"labeled path doesn't exists\")\n\t}\n\n\tp := &processorImpl{\n\t\tunlabeledPath: unlabeledPath,\n\t\tlabeledPath: labeledPath,\n\t\tinpChan: make(CommandChan),\n\t}\n\n\tp.start()\n\n\treturn p, nil\n}", "func New(l *lexer.Lexer) *Parser {\n\n\t// Create the parser, and prime the pump\n\tp := &Parser{l: l, errors: []Error{}}\n\tp.nextToken()\n\tp.nextToken()\n\n\t// Register prefix-functions\n\tp.prefixParseFns = [tokentype.TokenType_Count]prefixParseFn{\n\t\ttokentype.BACKTICK: p.parseBacktickLiteral,\n\t\ttokentype.BANG: p.parsePrefixExpression,\n\t\ttokentype.DEFINE_FUNCTION: p.parseFunctionDefinition,\n\t\ttokentype.EOF: p.parsingBroken,\n\t\ttokentype.FALSE: p.parseBoolean,\n\t\ttokentype.FLOAT: p.parseFloatLiteral,\n\t\ttokentype.FOR: p.parseForLoopExpression,\n\t\ttokentype.FOREACH: p.parseForEach,\n\t\ttokentype.FUNCTION: p.parseFunctionLiteral,\n\t\ttokentype.IDENT: p.parseIdentifier,\n\t\ttokentype.IF: p.parseIfExpression,\n\t\ttokentype.ILLEGAL: p.parsingBroken,\n\t\ttokentype.INT: p.parseIntegerLiteral,\n\t\ttokentype.LBRACE: p.parseHashLiteral,\n\t\ttokentype.LBRACKET: p.parseArrayLiteral,\n\t\ttokentype.LPAREN: p.parseGroupedExpression,\n\t\ttokentype.MINUS: p.parsePrefixExpression,\n\t\ttokentype.REGEXP: p.parseRegexpLiteral,\n\t\ttokentype.STRING: p.parseStringLiteral,\n\t\ttokentype.SWITCH: p.parseSwitchStatement,\n\t\ttokentype.TRUE: p.parseBoolean,\n\t}\n\n\t// Register infix functions\n\tp.infixParseFns = [tokentype.TokenType_Count]infixParseFn{\n\t\ttokentype.AND: p.parseInfixExpression,\n\t\ttokentype.ASSIGN: p.parseAssignExpression,\n\t\ttokentype.ASTERISK: p.parseInfixExpression,\n\t\ttokentype.ASTERISK_EQUALS: p.parseAssignExpression,\n\t\ttokentype.CONTAINS: p.parseInfixExpression,\n\t\ttokentype.DOTDOT: p.parseInfixExpression,\n\t\ttokentype.EQ: p.parseInfixExpression,\n\t\ttokentype.GT: p.parseInfixExpression,\n\t\ttokentype.GT_EQUALS: p.parseInfixExpression,\n\t\ttokentype.LBRACKET: p.parseIndexExpression,\n\t\ttokentype.LPAREN: p.parseCallExpression,\n\t\ttokentype.LT: p.parseInfixExpression,\n\t\ttokentype.LT_EQUALS: p.parseInfixExpression,\n\t\ttokentype.MINUS: p.parseInfixExpression,\n\t\ttokentype.MINUS_EQUALS: p.parseAssignExpression,\n\t\ttokentype.MOD: p.parseInfixExpression,\n\t\ttokentype.NOT_CONTAINS: p.parseInfixExpression,\n\t\ttokentype.NOT_EQ: p.parseInfixExpression,\n\t\ttokentype.OR: p.parseInfixExpression,\n\t\ttokentype.PERIOD: p.parseMethodCallExpression,\n\t\ttokentype.PLUS: p.parseInfixExpression,\n\t\ttokentype.PLUS_EQUALS: p.parseAssignExpression,\n\t\ttokentype.POW: p.parseInfixExpression,\n\t\ttokentype.QUESTION: p.parseTernaryExpression,\n\t\ttokentype.SLASH: p.parseInfixExpression,\n\t\ttokentype.SLASH_EQUALS: p.parseAssignExpression,\n\t}\n\n\t// Register postfix functions.\n\tp.postfixParseFns = [tokentype.TokenType_Count]postfixParseFn{\n\t\ttokentype.MINUS_MINUS: p.parsePostfixExpression,\n\t\ttokentype.PLUS_PLUS: p.parsePostfixExpression,\n\t}\n\n\t// All done\n\treturn p\n}", "func newToken(tokenType TokenType, literal byte) Token {\n\treturn Token{Type: tokenType, Literal: string(literal)}\n}", "func New() *QueryProcessor {\n\treturn &QueryProcessor{}\n}", "func Literal(v interface{}) Annotation {\n\treturn &literalAnnotation{v}\n}", "func NewProcessor() *Processor {\n\taddedPaper := make(chan *datastore.PaperImpl)\n\tpapers := make(map[string]*datastore.PaperImpl)\n\n\tgotPaper := make(chan string)\n\tpaper := make(chan *datastore.PaperImpl)\n\n\tgotBook := make(chan string)\n\tbook := make(chan *datastore.BookImpl)\n\tbooks := make(map[string]*datastore.BookImpl)\n\n\tsetAnswer := make(chan error)\n\tpaperAnswer := make(chan *paperAnswer)\n\n\tdone := make(chan bool)\n\n\treturn &Processor{\n\t\taddedPaper: addedPaper,\n\t\tpapers: papers,\n\n\t\tgotPaper: gotPaper,\n\t\tpaper: paper,\n\n\t\tgotBook: gotBook,\n\t\tbook: book,\n\t\tbooks: books,\n\n\t\tsetAnswer: setAnswer,\n\t\tpaperAnswer: paperAnswer,\n\n\t\tdone: done,\n\t}\n}", "func (i *Int) Literal() {}", "func (f *Flt) Literal() {}", "func NewBooleanLiteral(pos int, line int, val bool, original string) *BooleanLiteral {\n\treturn &BooleanLiteral{\n\t\tNodeType: NodeBoolean,\n\t\tLoc: Loc{pos, line},\n\n\t\tValue: val,\n\t\tOriginal: original,\n\t}\n}", "func ProcessFunc(p Processor) {\n\tprocessor = p\n}", "func ProcessFunc(p Processor) {\n\tprocessor = p\n}", "func NewConditional(\n\truleFactory Constructor,\n) Constructor {\n\treturn func(cfg *common.Config) (Processor, error) {\n\t\trule, err := ruleFactory(cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn addCondition(cfg, rule)\n\t}\n}", "func (s String) ProcessConstant(p ConstantProcessor) {\n\tp.ProcessString(s)\n}", "func NewIntegerLiteral(s string) Expression {\n\tval, err := strconv.ParseInt(s, 10, 0)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &integerLiteral{value: &integer{value: val}}\n}", "func (*Base) LiteralNumber(p ASTPass, node *ast.LiteralNumber, ctx Context) {\n}", "func NewProcessor(datadogCl DatadogClient) *Processor {\n\texternalMaxAge := math.Max(config.Datadog.GetFloat64(\"external_metrics_provider.max_age\"), 3*config.Datadog.GetFloat64(\"external_metrics_provider.rollup\"))\n\treturn &Processor{\n\t\texternalMaxAge: time.Duration(externalMaxAge) * time.Second,\n\t\tdatadogClient: datadogCl,\n\t}\n}", "func ParseLiteral(\n\tliteral string,\n\tty sema.Type,\n\tinter *interpreter.Interpreter,\n) (\n\tcadence.Value,\n\terror,\n) {\n\tcode := []byte(literal)\n\n\texpression, errs := parser.ParseExpression(inter, code, parser.Config{})\n\tif len(errs) > 0 {\n\t\treturn nil, parser.Error{\n\t\t\tCode: code,\n\t\t\tErrors: errs,\n\t\t}\n\t}\n\n\treturn LiteralValue(inter, expression, ty)\n}", "func NewRegex(v string) (Value, error) {\n\trx, err := regexp.Compile(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rxValue{rx}, nil\n}", "func (v *Boolean) FromLiteralValue(l schema.LiteralValue) error {\n\tif l == nil {\n\t\t*v = Boolean{false, false}\n\t\treturn nil\n\t}\n\n\tif PermissiveInputParsing {\n\t\tswitch c := l.(type) {\n\t\tcase schema.LiteralString:\n\t\t\ts := string(c)\n\t\t\tif s == \"true\" {\n\t\t\t\t*v = Boolean{true, true}\n\t\t\t\tPermissiveInputCallback(\"Boolean\", l)\n\t\t\t\treturn nil\n\t\t\t} else if s == \"false\" {\n\t\t\t\t*v = Boolean{false, true}\n\t\t\t\tPermissiveInputCallback(\"Boolean\", l)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase schema.LiteralNumber:\n\t\t\tif c == 0 {\n\t\t\t\t*v = Boolean{false, true}\n\t\t\t} else {\n\t\t\t\t*v = Boolean{true, true}\n\t\t\t}\n\t\t\tPermissiveInputCallback(\"Boolean\", l)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tswitch c := l.(type) {\n\tcase schema.LiteralBool:\n\t\tb := bool(c)\n\t\t*v = Boolean{b, true}\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"Literal value %v is not a bool\", l)\n\t}\n}", "func NewProcessor(context context.T, processorName string, processorService service.Service, commandWorkerLimit int, cancelWorkerLimit int, pollAssoc bool, supportedDocs []model.DocumentType) *Processor {\n\tlog := context.Log()\n\tconfig := context.AppConfig()\n\n\tinstanceID, err := platform.InstanceID()\n\tif instanceID == \"\" {\n\t\tlog.Errorf(\"no instanceID provided, %v\", err)\n\t\treturn nil\n\t}\n\n\tagentInfo := contracts.AgentInfo{\n\t\tLang: config.Os.Lang,\n\t\tName: config.Agent.Name,\n\t\tVersion: config.Agent.Version,\n\t\tOs: config.Os.Name,\n\t\tOsVersion: config.Os.Version,\n\t}\n\n\tagentConfig := contracts.AgentConfiguration{\n\t\tAgentInfo: agentInfo,\n\t\tInstanceID: instanceID,\n\t}\n\n\t// sendCommand and cancelCommand will be processed by separate worker pools\n\t// so we can define the number of workers per each\n\tcancelWaitDuration := 10000 * time.Millisecond\n\tclock := times.DefaultClock\n\tsendCommandTaskPool := task.NewPool(log, commandWorkerLimit, cancelWaitDuration, clock)\n\tcancelCommandTaskPool := task.NewPool(log, CancelWorkersLimit, cancelWaitDuration, clock)\n\n\t// create new message processor\n\torchestrationRootDir := filepath.Join(appconfig.DefaultDataStorePath, instanceID, appconfig.DefaultDocumentRootDirName, config.Agent.OrchestrationRootDir)\n\n\treplyBuilder := func(pluginID string, results map[string]*contracts.PluginResult) messageContracts.SendReplyPayload {\n\t\truntimeStatuses := reply.PrepareRuntimeStatuses(log, results)\n\t\treturn reply.PrepareReplyPayload(pluginID, runtimeStatuses, clock.Now(), agentConfig.AgentInfo)\n\t}\n\n\tstatusReplyBuilder := func(agentInfo contracts.AgentInfo, resultStatus contracts.ResultStatus, documentTraceOutput string) messageContracts.SendReplyPayload {\n\t\treturn parser.PrepareReplyPayloadToUpdateDocumentStatus(agentInfo, resultStatus, documentTraceOutput)\n\n\t}\n\t// create a stop policy where we will stop after 10 consecutive errors and if time period expires.\n\tprocessorStopPolicy := newStopPolicy(processorName)\n\n\t// SendResponse is used to send response on plugin completion.\n\t// If pluginID is empty it will send responses of all plugins.\n\t// If pluginID is specified, response will be sent of that particular plugin.\n\tsendResponse := func(messageID string, pluginID string, results map[string]*contracts.PluginResult) {\n\t\tpayloadDoc := replyBuilder(pluginID, results)\n\t\tprocessSendReply(log, messageID, processorService, payloadDoc, processorStopPolicy)\n\t}\n\n\t// SendDocLevelResponse is used to send document level update\n\t// Specify a new status of the document\n\tsendDocLevelResponse := func(messageID string, resultStatus contracts.ResultStatus, documentTraceOutput string) {\n\t\tpayloadDoc := statusReplyBuilder(agentInfo, resultStatus, documentTraceOutput)\n\t\tprocessSendReply(log, messageID, processorService, payloadDoc, processorStopPolicy)\n\t}\n\n\t// PersistData is used to persist the data into a bookkeeping folder\n\tpersistData := func(state *model.DocumentState, bookkeeping string) {\n\t\tdocmanager.PersistData(log, state.DocumentInformation.DocumentID, state.DocumentInformation.InstanceID, bookkeeping, *state)\n\t}\n\n\tvar assocProc *processor.Processor\n\tif pollAssoc {\n\t\tassocProc = processor.NewAssociationProcessor(context, instanceID)\n\t}\n\treturn &Processor{\n\t\tcontext: context,\n\t\tname: processorName,\n\t\tstopSignal: make(chan bool),\n\t\tconfig: agentConfig,\n\t\tservice: processorService,\n\t\tpluginRunner: pluginRunner,\n\t\tsendCommandPool: sendCommandTaskPool,\n\t\tcancelCommandPool: cancelCommandTaskPool,\n\t\tbuildReply: replyBuilder,\n\t\tsendResponse: sendResponse,\n\t\tsendDocLevelResponse: sendDocLevelResponse,\n\t\torchestrationRootDir: orchestrationRootDir,\n\t\tpersistData: persistData,\n\t\tprocessorStopPolicy: processorStopPolicy,\n\t\tassocProcessor: assocProc,\n\t\tpollAssociations: pollAssoc,\n\t\tsupportedDocTypes: supportedDocs,\n\t}\n}", "func NewValue(v string) (Value, error) {\n\tif regexp.QuoteMeta(v) == v {\n\t\treturn NewEqual(v), nil\n\t}\n\treturn NewRegex(v)\n}", "func New(links links.LinkMap) horst.ProcessorManager {\n\treturn &manager{\n\t\tprocessors: make(map[string]horst.Processor),\n\t\tlinks: links,\n\t}\n}", "func New() helmify.Processor {\n\treturn &crd{}\n}", "func New(grammarString string) (*grammar.Grammar, tree.Reducer, error) {\n\tparseTree, err := runner.Run(grammarString)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tg, r := evalGrammar(parseTree.(*tree.PN))\n\treturn g, r, nil\n}", "func New(driver Driver) *Processor {\n\tstdout := make(chan string)\n\tstderr := make(chan string)\n\tsuccess := make(chan bool)\n\tfailure := make(chan bool)\n\tdone := make(chan bool)\n\treturn &Processor{\n\t\tstdout: stdout, stderr: stderr, success: success,\n\t\tfailure: failure, done: done, driver: driver}\n}", "func (v *ID) FromLiteralValue(l schema.LiteralValue) error {\n\tif l == nil {\n\t\t*v = ID{\"\", false}\n\t\treturn nil\n\t}\n\n\tswitch c := l.(type) {\n\tcase schema.LiteralString:\n\t\ts := string(c)\n\t\t*v = ID{s, true}\n\t\treturn nil\n\tcase schema.LiteralNumber:\n\t\ts := strconv.Itoa(int(c))\n\t\t*v = ID{s, true}\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"Literal value %v is not a string or number\", l)\n\t}\n}", "func NewProcessor(r client.Client,\n\toption *option.ControllerOption,\n\tcloudNetClient pbcloudnet.CloudNetserviceClient,\n\tcloudClient cloudAPI.Interface,\n\tnodeEventer record.EventRecorder) *Processor {\n\n\treturn &Processor{\n\t\tkubeClient: r,\n\t\toption: option,\n\t\tcloudNetClient: cloudNetClient,\n\t\tcloudClient: cloudClient,\n\t\tnodeEventer: nodeEventer,\n\t\teventChan: make(chan struct{}, 10),\n\t}\n}", "func (s *Switch) TokenLiteral() token.Token { return s.Token }", "func New(l *lexer.Lexer, context string) *Parser {\n\tp := &Parser{\n\t\tl: l,\n\t\tContext: context,\n\t\terrors: []string{},\n\t}\n\n\tp.prefixParseFns = make(map[token.Type]prefixParseFn)\n\tp.registerPrefix(token.Ident, p.parseIdentifier)\n\tp.registerPrefix(token.Int, p.parseIntegerLiteral)\n\tp.registerPrefix(token.String, p.parseStringLiteral)\n\tp.registerPrefix(token.Bang, p.parsePrefixExpression)\n\tp.registerPrefix(token.Minus, p.parsePrefixExpression)\n\tp.registerPrefix(token.True, p.parseBoolean)\n\tp.registerPrefix(token.False, p.parseBoolean)\n\tp.registerPrefix(token.Lparen, p.parseGroupedExpression)\n\tp.registerPrefix(token.If, p.parseIfExpression)\n\tp.registerPrefix(token.Function, p.parseFunctionLiteral)\n\tp.registerPrefix(token.Lbracket, p.parseArray)\n\n\tp.infixParseFns = make(map[token.Type]infixParseFn)\n\tp.registerInfix(token.Plus, p.parseInfixExpression)\n\tp.registerInfix(token.Minus, p.parseInfixExpression)\n\tp.registerInfix(token.Slash, p.parseInfixExpression)\n\tp.registerInfix(token.Astersik, p.parseInfixExpression)\n\tp.registerInfix(token.EQ, p.parseInfixExpression)\n\tp.registerInfix(token.NEQ, p.parseInfixExpression)\n\tp.registerInfix(token.LT, p.parseInfixExpression)\n\tp.registerInfix(token.GT, p.parseInfixExpression)\n\tp.registerInfix(token.LTE, p.parseInfixExpression)\n\tp.registerInfix(token.GTE, p.parseInfixExpression)\n\tp.registerInfix(token.Lparen, p.parseCallExpression)\n\tp.registerInfix(token.Dot, p.parseInfixCallExpression)\n\tp.registerInfix(token.Assign, p.parseAssignmentExpression)\n\n\tp.nextToken() // set currToken\n\tp.nextToken() // set peekToken\n\n\treturn p\n}", "func (m *Model) NewProc(ctx nn.Context) nn.Processor {\n\tprocLayers := make([]nn.Processor, len(m.Layers))\n\tfor i, layer := range m.Layers {\n\t\tprocLayers[i] = layer.NewProc(ctx)\n\t}\n\treturn &Processor{\n\t\tBaseProcessor: nn.BaseProcessor{\n\t\t\tModel: m,\n\t\t\tMode: ctx.Mode,\n\t\t\tGraph: ctx.Graph,\n\t\t\tFullSeqProcessing: requiresFullSeq(procLayers),\n\t\t},\n\t\tLayers: procLayers,\n\t}\n}", "func New(cfg *common.Config) (processors.Processor, error) {\n\treturn newProcessMetadataProcessorWithProvider(cfg, &procCache)\n}", "func New(typ Type, pos int, lit ...string) Token {\n\tdefOrLit, ok := defaultLiteral[typ]\n\tif len(lit) == 0 && !ok {\n\t\tpanic(\"non-passing a literal for a token with no default literal\")\n\t}\n\tif !ok {\n\t\tdefOrLit = lit[0]\n\t}\n\n\treturn Token{\n\t\tType: typ,\n\t\tPos: pos,\n\t\tLiteral: defOrLit,\n\t}\n}", "func Literal(literal string) *LiteralArgumentBuilder {\n\treturn &LiteralArgumentBuilder{Literal: literal}\n}", "func New(c *common.Config) (processors.Processor, error) {\n\tconf := defaultConfig()\n\tif err := c.Unpack(&conf); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewFromConfig(conf, monitoring.Default)\n}", "func (c *Compiler) CompileIntoPreProcessedText(source string, shaderType ShaderType, inputFilename string, entryPoint string, options *CompilerOptions) *CompilationResult {\n\tcr := &CompilationResult{}\n\tcr.result = C.shaderc_compile_into_preprocessed_text(c.compiler,\n\t\tC.CString(source),\n\t\tC.ulong(len(source)),\n\t\tC.shaderc_shader_kind(shaderType),\n\t\tC.CString(inputFilename),\n\t\tC.CString(entryPoint), options.options)\n\treturn cr\n}", "func NewComp(r Regex) Regex {\n\treturn &comp{\n\t\tr: r,\n\t}\n}", "func (m *Model) NewProc(ctx nn.Context) nn.Processor {\n\treturn &Processor{\n\t\tBaseProcessor: nn.BaseProcessor{\n\t\t\tModel: m,\n\t\t\tMode: ctx.Mode,\n\t\t\tGraph: ctx.Graph,\n\t\t\tFullSeqProcessing: false,\n\t\t},\n\t\tw: ctx.Graph.NewWrap(m.W),\n\t\tb: ctx.Graph.NewWrap(m.B),\n\t\teps: ctx.Graph.Constant(1e-10),\n\t}\n}", "func NewProcessor(projectID string, sub commons.Subscriber, pub commons.Publisher) (processor *Processor, err error) {\n\n\tp := &Processor{\n\t\tcontext: context.Background(),\n\t\tsubscriber: sub,\n\t\tpublisher: pub,\n\t}\n\n\tclient, err := vision.NewImageAnnotatorClient(p.context)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to create client: %v\", err)\n\t}\n\n\tp.client = client\n\n\treturn p, nil\n\n}", "func NewProcessor(ctx context.Context, cfg *config.ProcessorConfigKnative, ms metrics.Receiver, log logger.Logger, opts ...Option) (*Processor, error) {\n\tkLog := log\n\tif zapSugared, ok := log.(*zap.SugaredLogger); ok {\n\t\tproc := strings.ToUpper(string(config.ProcessorKnative))\n\t\tkLog = zapSugared.Named(fmt.Sprintf(\"[%s]\", proc))\n\t}\n\n\tif cfg == nil {\n\t\treturn nil, errors.New(\"knative configuration must be provided\")\n\t}\n\n\t// validate the given target/destination\n\tif err := cfg.Destination.Validate(ctx); err != nil {\n\t\treturn nil, errors.Wrap(err, \"validate configuration\")\n\t}\n\n\tvar p Processor\n\t// apply options\n\tfor _, opt := range opts {\n\t\tif err := opt(&p); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// assume in-cluster mode if empty\n\tif p.kConfig == nil {\n\t\tkCfg, err := rest.InClusterConfig()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"get Kubernetes configuration\")\n\t\t}\n\t\tp.kConfig = kCfg\n\t}\n\n\t// start Knative informers\n\tctx = logging.WithLogger(ctx, kLog.(*zap.SugaredLogger))\n\tctx, startInformers := injection.EnableInjectionOrDie(ctx, p.kConfig)\n\tstartInformers()\n\n\t// placeholder type, works with any addressable, e.g. Broker, kService\n\tvar source corev1.Service\n\turiResolver := resolver.NewURIResolverFromTracker(ctx, tracker.New(func(name types.NamespacedName) {}, controller.GetTrackerLease(ctx)))\n\turi, err := uriResolver.URIFromDestinationV1(ctx, *cfg.Destination, &source)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get URI from destination\")\n\t}\n\n\ttarget := uri.String()\n\tclient, err := ceClient(target, cfg.InsecureSSL, cfg.Encoding)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.Logger = kLog\n\tp.ceClient = client\n\tp.sink = target\n\tp.stats = metrics.EventStats{\n\t\tProvider: string(config.ProcessorKnative),\n\t\tType: config.EventProcessor,\n\t\tAddress: p.sink,\n\t\tStarted: time.Now().UTC(),\n\t\tInvocations: make(map[string]*metrics.InvocationDetails),\n\t}\n\n\tgo p.PushMetrics(ctx, ms)\n\treturn &p, nil\n}", "func (s *BaseSyslParserListener) EnterLiteral(ctx *LiteralContext) {}", "func newNode(expr string) (node, error) {\n\tvar f node\n\tswitch strings.ToLower(expr) {\n\tcase \"\", \"total\":\n\t\tf.value = f.valueTotal\n\tdefault:\n\t\tnodeExpr, err := regexp.Compile(expr)\n\t\tif err != nil {\n\t\t\treturn f, fmt.Errorf(\"node must be either 'total' or a valid regexp: %w\", err)\n\t\t}\n\t\tf.expr = nodeExpr\n\t\tf.value = f.valueSelf\n\t}\n\treturn f, nil\n}", "func (cl *CharLiteral) TokenLiteral() token.Token { return cl.Token }", "func (s *BasePlSqlParserListener) EnterLiteral(ctx *LiteralContext) {}", "func New(l *lexer.Lexer) *Parser {\n\tp := &Parser{\n\t\tl: l,\n\t\terrors: []string{},\n\t}\n\n\t// Read two tokens, so curToken and peekToken are both set\n\tp.nextToken()\n\tp.nextToken()\n\n\tp.prefixParseFns = make(map[token.Type]prefixParseFn)\n\n\tp.registerPrefix(token.IDENT, p.parseIdentifier)\n\tp.registerPrefix(token.TRUE, p.parseBoolean)\n\tp.registerPrefix(token.FALSE, p.parseBoolean)\n\tp.registerPrefix(token.INT, p.parseIntegerLiteral)\n\tp.registerPrefix(token.BANG, p.parsePrefixExpression)\n\tp.registerPrefix(token.MINUS, p.parsePrefixExpression)\n\tp.registerPrefix(token.LPAREN, p.parseGroupedExpression)\n\tp.registerPrefix(token.IF, p.parseIfExpression)\n\tp.registerPrefix(token.FUNCTION, p.parseFunctionLiteral)\n\n\tp.infixParseFns = make(map[token.Type]infixParseFn)\n\n\tp.registerInfix(token.PLUS, p.parseInfixExpression)\n\tp.registerInfix(token.MINUS, p.parseInfixExpression)\n\tp.registerInfix(token.SLASH, p.parseInfixExpression)\n\tp.registerInfix(token.ASTERISK, p.parseInfixExpression)\n\tp.registerInfix(token.EQ, p.parseInfixExpression)\n\tp.registerInfix(token.NEQ, p.parseInfixExpression)\n\tp.registerInfix(token.LT, p.parseInfixExpression)\n\tp.registerInfix(token.GT, p.parseInfixExpression)\n\tp.registerInfix(token.LPAREN, p.parseCallExpression)\n\n\treturn p\n}", "func New(clusterSummary *repository.ClusterSummary, nodeInfoLister NodeInfoLister,\n\tnamespaceLister listersv1.NamespaceLister) (*PodAffinityProcessor, error) {\n\tpr := &PodAffinityProcessor{\n\t\tnodeInfoLister: nodeInfoLister,\n\t\tpodToVolumesMap: clusterSummary.PodToVolumesMap,\n\t\t// TODO: make the parallelizm configurable\n\t\tparallelizer: parallelizer.NewParallelizer(parallelizer.DefaultParallelism),\n\t\tnsLister: namespaceLister,\n\t\tuniqueAffinityTerms: sets.NewString(),\n\t\tuniqueAntiAffinityTerms: sets.NewString(),\n\t}\n\n\treturn pr, nil\n}", "func NewScheduleProcessor(ctxIn context.Context, credentialsProvider secret.SecretProvider) (ScheduleProcessor, error) {\n ctx, span := trace.StartSpan(ctxIn, \"NewScheduleProcessor\")\n defer span.End()\n\n backupRepository, err := repository.NewBackupRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n jobRepository, err := repository.NewJobRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n sourceMetadataRepository, err := repository.NewSourceMetadataRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n sourceMetadataJobRepository, err := repository.NewSourceMetadataJobRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n sourceTrashcanRepository, err := repository.NewSourceTrashcanRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n return &defaultScheduleProcessor{\n backupRepository: backupRepository,\n jobRepository: jobRepository,\n sourceMetadataRepository: sourceMetadataRepository,\n sourceMetadataJobRepository: sourceMetadataJobRepository,\n sourceTrashcanRepository: sourceTrashcanRepository,\n }, nil\n}", "func (c *Condition) TokenLiteral() token.Token { return c.Token }", "func (BooleanLiteral) literalNode() {}", "func New(comm impl.CommandProcessor) (command, error) {\n\tif comm == nil {\n\t\treturn command{}, errors.New(\"command processor is not valid\")\n\t}\n\treturn command{comm: comm}, nil\n}", "func New(path string) (gval.Evaluable, error) {\n\treturn lang.NewEvaluable(path)\n}", "func New(input string) *Lexer {\n\tl := &Lexer{input: input, runes: []rune(input)}\n\tl.readChar()\n\treturn l\n}", "func New() *Compiler {\n\trootScope := Scope{\n\t\tinstructions: operation.Instruction{},\n\t\temitted: Emitted{},\n\t\tprevEmitted: Emitted{},\n\t}\n\n\tsymbolTable := symbols.New()\n\tfor i, v := range data.Builtins {\n\t\tsymbolTable.DefineBuiltin(i, v.Name)\n\t}\n\n\treturn &Compiler{\n\t\tconstants: []data.Data{},\n\t\tsymbols: symbolTable,\n\t\tscopes: []Scope{rootScope},\n\t\tcurrentScope: 0,\n\t}\n}", "func emitLiteral(dst *tokens, lit []byte) {\n\tol := int(dst.n)\n\tfor i, v := range lit {\n\t\tdst.tokens[(i+ol)&maxStoreBlockSize] = token(v)\n\t}\n\tdst.n += uint16(len(lit))\n}", "func (gs *GenerateStatement) TokenLiteral() string { return gs.Token.Literal }", "func New(input string) *Lexer {\n\tl := &Lexer{input: input} //instantiate and return its location\n\tl.readChar()\n\treturn l\n}", "func New() helmify.Processor {\n\treturn &secret{}\n}", "func New(input string) *Lexer {\n\treturn &Lexer{\n\t\tinput: input,\n\t}\n}", "func New(src string) *Lexer {\n\tl := &Lexer{\n\t\tinput: src,\n\t}\n\t// step to the first character in order to be ready\n\tl.readChar()\n\treturn l\n}", "func (l *Loop) TokenLiteral() token.Token { return l.Token }" ]
[ "0.5704756", "0.5340582", "0.5268215", "0.5259007", "0.52381647", "0.5149707", "0.51186323", "0.50372", "0.49771982", "0.49668", "0.49517778", "0.49495387", "0.49202168", "0.4899631", "0.48374823", "0.48111275", "0.4775098", "0.47703588", "0.47358263", "0.47049072", "0.4694658", "0.46817857", "0.46765193", "0.46761405", "0.46680483", "0.46638495", "0.4646838", "0.46466386", "0.46309042", "0.46173924", "0.46058905", "0.46013433", "0.4594121", "0.4581426", "0.45596576", "0.455213", "0.4547879", "0.4519655", "0.4493047", "0.44905224", "0.44843784", "0.44826642", "0.44781706", "0.44581574", "0.4453587", "0.4449858", "0.4438612", "0.44348174", "0.44208515", "0.44115105", "0.4403214", "0.4403214", "0.439771", "0.43799424", "0.4377071", "0.43735805", "0.43708292", "0.4370818", "0.43568665", "0.43534046", "0.43515724", "0.43474463", "0.43456656", "0.43431267", "0.43413183", "0.43376493", "0.43319193", "0.4330285", "0.4328535", "0.43279058", "0.43092594", "0.42925546", "0.42768306", "0.42577878", "0.42546648", "0.42432246", "0.4243137", "0.4240282", "0.42392367", "0.42321953", "0.42296988", "0.42202023", "0.42151365", "0.42129824", "0.4212902", "0.42081732", "0.42048112", "0.42033303", "0.4202815", "0.41979325", "0.4187805", "0.417587", "0.41745925", "0.41743404", "0.41599315", "0.41481683", "0.41462666", "0.41425872", "0.41415733", "0.41370997" ]
0.816712
0
C returns the streaming data channel.
func (p *literalProcessor) C() <-chan map[string]interface{} { return p.c }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Subscription) C() <-chan interface{} {\n\treturn s.channel\n}", "func (s *subscription) C() <-chan interface{} {\n\treturn s.c\n}", "func (c *dataReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (uc *UnboundedChannel) Get() <-chan interface{} {\n\treturn uc.channel\n}", "func (l *Logger) C() chan<- interface{} {\n\treturn l.src\n}", "func (s *Scanner) C() <-chan []Measurement {\n\treturn s.ch\n}", "func (p *HostedProgramInfo) Channel() io.ReadWriteCloser {\n\treturn p.TaoChannel\n}", "func (conn *Connection) Channel() chan []byte {\n\treturn conn.channel\n}", "func (remote *SerialRemote) Channel() chan []byte {\n\treturn remote.channel\n}", "func (ticker *PausableTicker) GetChannel() <-chan time.Time {\n\treturn ticker.channel\n}", "func (o *Output) Read(channel int) *Buffer {\n\treturn o.channels[channel].Copy()\n}", "func (f *FFS) Get(ctx context.Context, c cid.Cid) (io.Reader, error) {\n\tstream, err := f.client.Get(ctx, &rpc.GetRequest{\n\t\tCid: util.CidToString(c),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader, writer := io.Pipe()\n\tgo func() {\n\t\tfor {\n\t\t\treply, err := stream.Recv()\n\t\t\tif err == io.EOF {\n\t\t\t\t_ = writer.Close()\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\t_ = writer.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = writer.Write(reply.GetChunk())\n\t\t\tif err != nil {\n\t\t\t\t_ = writer.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn reader, nil\n}", "func (s *p4RuntimeServer) StreamChannel(stream p4.P4Runtime_StreamChannelServer) error {\n\tfmt.Println(\"Starting bi-directional channel\")\n\tfor {\n\t\tinData, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Printf(\"%v\", inData)\n\t}\n\n\treturn nil\n}", "func (wp *Pool) C() <-chan Processor {\n\treturn wp.resultChan\n}", "func (p *pipeline) Channel() Channel {\n\treturn p.channel\n}", "func (c *webSocketClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (r *reducer) C() <-chan map[string]interface{} { return r.c }", "func (c *Computation) Data() <-chan *messages.DataMessage {\n\treturn c.dataCh\n}", "func (e *binaryExprEvaluator) C() <-chan map[string]interface{} { return e.c }", "func (p *Player) Channel() *api.Channel {\n\tretCh := make(chan *api.Channel)\n\tp.chGetChannel <- retCh\n\tc := <-retCh\n\treturn c\n}", "func (me *T) Data() <-chan float64 {\n\n\t// Create channel.\n\t//\n\t// We will return this to the caller.\n\t//\n\t// We will also spawn a goroutine and output the data from this datasack has onto it.\n\t//\n\t\tout := make(chan float64)\n\n\t// Spawn a goroutine that will output the data from this datasack onto the channel\n\t// we previously created.\n\t//\n\t// Note that this goroutine will probably block. But that's OK, since it is in\n\t// its own goroutine (and shouldn't take anything else down with it).\n\t//\n\t\tgo func() {\n\t\t\tfor _,value := range me.slice {\n\t\t\t\tout <- value\n\t\t\t}\n\n\t\t\tclose(out)\n\t\t}()\n\n\t// Return.\n\t\treturn out\n}", "func ReadData(c <-chan string) {\n\tfmt.Printf(\"Read Data: %s\\n\", <-c) // 只能收\n}", "func (wet *WETReader) Channel() (<-chan struct { Entry *WETEntry; Err error }) {\n channel := make(chan struct { Entry *WETEntry; Err error })\n go func() {\n defer func() {\n wet.Close()\n close(channel)\n }()\n for {\n entry, err := wet.extractEntry()\n channel <- struct { Entry *WETEntry; Err error }{ entry, err }\n if err != nil {\n return\n }\n }\n }()\n return channel\n}", "func (s *GameSocket) ReadChannel() <-chan *packet.Packet {\n\treturn s.readChan\n}", "func GetChannel(protocol, host string, port int, secureConfig *tls.Config) (ReaderWriterCloser, error) {\n\tvar conn net.Conn\n\tvar err error\n\tconn, err = net.Dial(protocol, host+\":\"+strconv.Itoa(port))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif protocol == \"tcp\" {\n\t\tconn.(*net.TCPConn).SetKeepAlive(true)\n\t\tconn.(*net.TCPConn).SetKeepAlivePeriod(30 * time.Second)\n\t}\n\tif secureConfig != nil {\n\t\tconn = tls.Client(conn, secureConfig)\n\t}\n\tvar readerWriter ReaderWriterCloser = &Channel{\n\t\tprotocol: protocol,\n\t\thost: host,\n\t\tport: port,\n\t\tconn: conn,\n\t\tmaxRead: 8 * 1024,\n\t\treadBuffer: make([]byte, 0),\n\t\twriteBuffer: make([]byte, 0),\n\t\twriteChannel: make(chan writeComplete, 100),\n\t\treadTimeout: 60 * time.Second,\n\t\twriteTimeout: 60 * time.Second,\n\t}\n\tgo readerWriter.(*Channel).writeRoutine()\n\treturn readerWriter, nil\n}", "func (f *feedback) Channel() (<-chan *FeedbackMessage, error) {\n\tif f.conn != nil {\n\t\treturn f.chanel, nil\n\t}\n\n\tif err := f.createConnection(); err != nil {\n\t\tlogerr(\"Unable to start feedback connection: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tf.stopWait.Add(1)\n\tgo f.monitorService()\n\n\treturn f.chanel, nil\n}", "func (c *webSocketFrameReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (res channelBase) Channel() *types.Channel {\n\treturn res.channel\n}", "func (o *KinesisOutput) GetOutputChannel() chan []byte {\n\treturn o.outputChannel\n}", "func (s VectOp) Stream() <-chan float64 {\n\tch := make(chan float64)\n\tgo feed(ch, s)\n\treturn ch\n}", "func (s *f64) Channel(c int) Floating {\n\treturn floatingChannel{\n\t\tbuffer: s,\n\t\tchannel: c,\n\t}\n}", "func (m *Manager) InputChannel() chan []byte {\n\treturn m.byteStream\n}", "func (m *MetricsExtracor) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (m *Module) Stream() <-chan bar.Output {\n\tch := base.NewChannel()\n\tgo m.worker(ch)\n\treturn ch\n}", "func (c *webSocketFrameSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (meta *MetaAI) GetChannel(c chan string) {\n\tmeta.l.Lock()\n\tdefer meta.l.Unlock()\n\n\tmeta.i = c\n}", "func getData(client pb.DataClient, filter *pb.DataFilter) {\r\n\t// calling the streaming API\r\n\tstream, err := client.GetData(context.Background(), filter)\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Error on get data: %v\", err)\r\n\t}\r\n\tfor {\r\n\t\tdata, err := stream.Recv()\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t}\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatalf(\"%v.GetData(_) = _, %v\", client, err)\r\n\t\t}\r\n\t\tlog.Printf(\"Data: %v\", data)\r\n\t}\r\n}", "func (m *mapper) C() <-chan map[string]interface{} { return m.c }", "func (c *requestServedFromCacheClient) GetStream() rpcc.Stream { return c.Stream }", "func (e *EventNotif) Channel() (res <-chan Event) {\n\treturn e.eventsCh\n}", "func (c *webSocketCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (stream *MAMWriteStream) Open() (trinary.Trytes, error) {\n\tchannelID, err := stream.m.ChannelCreate(5)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstream.currentChannelID = channelID\n\treturn channelID, nil\n}", "func (gi *Invoker) StreamRecv(param *common.Params) error {\n\t//gloryPkg := newGloryRequestPackage(\"\", param.MethodName, uint64(common.StreamSendPkg), param.Seq)\n\t//gloryPkg.Params = append(gloryPkg.Params, param.Value)\n\t//gloryPkg.Header.ChanOffset = param.ChanOffset\n\t//gloryPkg.Header.Seq = param.Seq\n\t//if err := gloryPkg.sendToConn(gi.gloryConnClient, gi.handler); err != nil {\n\t//\tlog.Error(\"StreamRecv: gloryPkg.sendToConn(gi.conn, gi.handler) err =\", err)\n\t//\treturn GloryErrorConnErr\n\t//}\n\treturn nil\n}", "func WrapDataChannel(rtcDataChannel RTCDataChannel) (*DataChannel, error) {\n\trr, rw, err := Pipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdc := &DataChannel{\n\t\tdc: rtcDataChannel,\n\t\trr: rr,\n\t}\n\tdc.dc.OnMessage(func(data []byte) {\n\t\tlog.WithField(\"data\", data).\n\t\t\tDebug(\"datachannel message\")\n\n\t\tif rw != nil {\n\t\t\t_, err := rw.Write(data)\n\t\t\tif err != nil {\n\t\t\t\trw.Close()\n\t\t\t\trw = nil\n\t\t\t}\n\t\t}\n\t})\n\treturn dc, nil\n}", "func (c *responseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *CounterChannel) Get() uint64 {\n\tc.check()\n\treturn <-c.readCh\n}", "func (c *webTransportClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (m *MetricsHolder) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (c *ChanReader) Read(out []byte) (int, error) {\n\tif c.buffer == nil {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(out, c.buffer)\n\tc.buffer = c.buffer[n:]\n\tif len(out) <= len(c.buffer) {\n\t\treturn n, nil\n\t} else if n > 0 {\n\t\t// We have some data to return, so make the channel read optional\n\t\tselect {\n\t\tcase p := <-c.input:\n\t\t\tif p == nil { // Stream was closed\n\t\t\t\tc.buffer = nil\n\t\t\t\tif n > 0 {\n\t\t\t\t\treturn n, nil\n\t\t\t\t}\n\t\t\t\treturn 0, io.EOF\n\t\t\t}\n\t\t\tn2 := copy(out[n:], p.Data)\n\t\t\tc.buffer = p.Data[n2:]\n\t\t\treturn n + n2, nil\n\t\tdefault:\n\t\t\treturn n, nil\n\t\t}\n\t}\n\tvar p *StreamChunk\n\tselect {\n\tcase p = <-c.input:\n\tcase <-c.interrupt:\n\t\tc.buffer = c.buffer[:0]\n\t\treturn n, ErrInterrupted\n\t}\n\tif p == nil { // Stream was closed\n\t\tc.buffer = nil\n\t\treturn 0, io.EOF\n\t}\n\tn2 := copy(out[n:], p.Data)\n\tc.buffer = p.Data[n2:]\n\treturn n + n2, nil\n}", "func (handle *Handle) GetStream() (Stream, error) {\n\tvar s Stream\n\tvar some *C.cudaStream_t\n\t//x := C.cudnnHandle_t(handle.Pointer())\n\n\ty := C.cudnnGetStream(handle.x, some)\n\ts.stream = *some\n\treturn s, Status(y).error(\"(*Handle).GetStream\")\n}", "func (c *loadingFinishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (std *ReaderService) Read() (<-chan []byte, error) {\n\tmc := make(chan []byte, 0)\n\n\tstd.pub.Subscribe(mc)\n\n\treturn mc, nil\n}", "func (c ConnectionAdapter) Channel() (Channel, error) {\n\treturn c.Connection.Channel()\n}", "func SourceData(data ...int) <-chan int {\n\tfmt.Println(\"num:\", len(data))\n\tch := make(chan int, 80000000)\n\tgo func() {\n\t\tfor _, v := range data {\n\t\t\tch <- v\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func (c *webSocketHandshakeResponseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *baseChannels) GetS3Channel() chan *S3Object {\n\treturn c.s3Channel\n}", "func (r *Readiness) GetChannel() chan ReadinessMessage {\n\treturn r.channel\n}", "func (nc *NetClient) readChannel() chan struct {\n\t*arbor.ProtocolMessage\n\terror\n} {\n\tout := make(chan struct {\n\t\t*arbor.ProtocolMessage\n\t\terror\n\t})\n\t// read messages continuously and send results back on a channel\n\tgo func() {\n\t\tdefer func() {\n\t\t\t// ensure send on closed channel doesn't cause panic\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tif _, ok := err.(runtime.Error); !ok {\n\t\t\t\t\t// silently cancel runtime errors, but allow other errors\n\t\t\t\t\t// to propagate.\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tfor {\n\t\t\tm := new(arbor.ProtocolMessage)\n\t\t\terr := nc.ReadWriteCloser.Read(m)\n\t\t\tout <- struct {\n\t\t\t\t*arbor.ProtocolMessage\n\t\t\t\terror\n\t\t\t}{m, err}\n\t\t}\n\t}()\n\treturn out\n}", "func (c *eventSourceMessageReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func stream_copy(src io.Reader, dst io.Writer) <-chan int {\n\tbuf := make([]byte, 1024)\n\tsync_channel := make(chan int)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif con, ok := dst.(net.Conn); ok {\n\t\t\t\tcon.Close()\n\t\t\t\tlog.Printf(\"Connection from %v is closed\\n\", con.RemoteAddr())\n\t\t\t}\n\t\t\tsync_channel <- 0 // Notify that processing is finished\n\t\t}()\n\t\tfor {\n\t\t\tvar nBytes int\n\t\t\tvar err error\n\t\t\tnBytes, err = src.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tlog.Printf(\"Read error: %s\\n\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = dst.Write(buf[0:nBytes])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Write error: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn sync_channel\n}", "func UnbufferedChannel() {\n\t/*\n\tbufferred channel would be c := make(chan int, 50)\n\tunbufferred channel\n\t */\n\tc := make(chan int)\n\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t// put number onto channel\n\t\t\t// code stops until the value is taken from the channel\n\t\t\t// like a relay race\n\t\t\tc <- i\n\t\t}\n\t}() // self executing anonymous function\n\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t// take the number off the channel\n\t\t\t// receive the value from the channel and print it\n\t\t\tv := <-c\n\t\t\tfmt.Println(v)\n\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second)\n}", "func (c *CryptoStreamConn) GetDataForWriting() []byte {\n\tdefer c.writeBuf.Reset()\n\tdata := make([]byte, c.writeBuf.Len())\n\tcopy(data, c.writeBuf.Bytes())\n\treturn data\n}", "func Stream(out chan<- Value) error {\n for {\n v, err := DoSomething() // HL\n if err != nil {\n return err\n }\n out <- v // HL\n }\n }", "func streamCopy(src io.Reader, dst io.Writer) <-chan int {\n\tbuf := make([]byte, 1024)\n\tsyncChannel := make(chan int)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif con, ok := dst.(net.Conn); ok {\n\t\t\t\tcon.Close()\n\t\t\t\t//log.Printf(\"Connection from %v is closed\\n\", con.RemoteAddr())\n\t\t\t}\n\t\t\tsyncChannel <- 0 // Notify that processing is finished\n\t\t}()\n\t\tfor {\n\n\t\t\tvar nBytes int\n\t\t\tvar err error\n\t\t\tnBytes, err = src.Read(buf)\n\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\t//log.Printf(\"Read error: %s\\n\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t_, err = dst.Write(buf[0:nBytes])\n\t\t\tif err != nil {\n\t\t\t\t//log.Fatalf(\"Write error: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn syncChannel\n}", "func (v Vehicle) Stream() (chan *StreamEvent, chan error, error) {\n\turl := StreamURL + \"/stream/\" + strconv.Itoa(v.VehicleID) + \"/?values=\" + StreamParams\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.SetBasicAuth(ActiveClient.Auth.Email, v.Tokens[0])\n\tresp, err := ActiveClient.HTTP.Do(req)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\teventChan := make(chan *StreamEvent)\n\terrChan := make(chan error)\n\tgo readStream(resp, eventChan, errChan)\n\n\treturn eventChan, errChan, nil\n}", "func NewChannel() (chan *fluent.FluentRecordSet, chan Stat) {\n\tmessageCh := make(chan *fluent.FluentRecordSet, MessageChannelBufferLen)\n\tmonitorCh := make(chan Stat, MonitorChannelBufferLen)\n\treturn messageCh, monitorCh\n}", "func NewChannel() (chan *fluent.FluentRecordSet, chan Stat) {\n\tmessageCh := make(chan *fluent.FluentRecordSet, MessageChannelBufferLen)\n\tmonitorCh := make(chan Stat, MonitorChannelBufferLen)\n\treturn messageCh, monitorCh\n}", "func (c *requestInterceptedClient) GetStream() rpcc.Stream { return c.Stream }", "func Stream(ctx context.Context, wC etcd.WatchChan) <-chan *etcd.Event {\n\teC := make(chan *etcd.Event, 1024)\n\n\tgo func(ctx context.Context, ec chan *etcd.Event) {\n\t\t// this unblocks any callers ranging on ec\n\t\tdefer close(ec)\n\n\t\t// etcd client will close this channel if error occurs\n\t\tfor wResp := range wC {\n\t\t\tif ok, err := chkctx.Check(ctx); ok {\n\t\t\t\tlog.Info().Str(\"component\", \"Stream\").Msgf(\"stream ctx canceled. returning: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif wResp.Canceled {\n\t\t\t\tlog.Info().Str(\"component\", \"Stream\").Msgf(\"watch channel error encountered. returning: %v\", wResp.Err())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, event := range wResp.Events {\n\t\t\t\teC <- event\n\t\t\t}\n\t\t}\n\t}(ctx, eC)\n\n\treturn eC\n}", "func (r *Receiver) Read() interface{} {\n\tutils.Debugln(\"Reading\")\n\tb := <-r.C // wait for a broadast channel\n\tv := b.v // retrieve value from received broadcastchannel\n\tr.C <- b // write same broadcastchannel to broadcastchannel\n\tr.C = b.c // broadcastchannel now becomes bc from broadcast\n\treturn v // return received value\n}", "func (c *ChangeWatcher) outC() chan *RoomChange {\n if len(c.buffer) <= 0 {\n return nil\n }\n return c.out\n}", "func (closer *Closer) CloseChannel() chan struct{} {\n\treturn closer.channel\n}", "func (r *ChannelReader) Read(b []byte) (sz int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tif len(r.buf) <= len(b) {\n\t\t\t\tsz = len(r.buf)\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = nil\n\t\t\t} else {\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = r.buf[len(b):]\n\t\t\t\tsz = len(b)\n\t\t\t}\n\t\t\treturn sz, nil\n\t\t}\n\n\t\tvar ok bool\n\t\tif r.deadline.IsZero() {\n\t\t\tr.buf, ok = <-r.c\n\t\t} else {\n\t\t\ttimer := time.NewTimer(r.deadline.Sub(time.Now()))\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase r.buf, ok = <-r.c:\n\t\t\tcase <-timer.C:\n\t\t\t\treturn 0, context.DeadlineExceeded\n\t\t\t}\n\t\t}\n\t\tif len(r.buf) == 0 && !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n}", "func (swp *SourceWorkerPool) GetOutputChannel() (chan map[string]interface{}, error) {\n\treturn swp.outputChannel, nil\n}", "func (p *Publisher) GetChannel() *amqp.Channel {\n\tp.publicMethodsLock.Lock()\n\tdefer p.publicMethodsLock.Unlock()\n\treturn p.getChannelWithoutLock()\n}", "func StreamCreateFile(data interface{}, offset int, flags Flags) (Channel, error) {\n\tvar ch C.DWORD\n\tswitch data := data.(type) {\n\tcase CBytes:\n\t\tch = C.BASS_StreamCreateFile(1, data.Data, culong(offset), culong(data.Length), cuint(flags))\n\tcase string:\n\t\tcstring := unsafe.Pointer(C.CString(data))\n\t\tdefer C.free(cstring)\n\t\tch = C.BASS_StreamCreateFile(0, cstring, culong(offset), 0, cuint(flags))\n\tcase []byte:\n\t\tcbytes := C.CBytes(data)\n\t\tch = C.BASS_StreamCreateFile(1, cbytes, culong(offset), culong(len(data)), cuint(flags))\n\t\t// unlike BASS_SampleLoad, BASS won't make a copy of the sample data internally, which means we can't just pass a pointer to the Go bytes. Instead we need to set a sync to free the bytes when the stream it's associated with is freed\n\t\tif ch != 0 {\n\t\t\tchannel := Channel(ch)\n\t\t\t_, err := channel.SetSync(SYNC_FREE, SYNC_ONETIME, 0, SyncprocFree, cbytes)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n\treturn channelToError(ch)\n}", "func (cc *CounterControl) StreamValues() (chan *CounterData, error) {\n\tentity := cc.counter.ReadWildcardRequest()\n\tentityList := []*p4V1.Entity{entity}\n\n\tcounterEntityCh, err := cc.control.Client.ReadEntities(entityList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcdataChannel := make(chan *CounterData, cc.counter.Size)\n\tgo func() {\n\t\tdefer close(cdataChannel)\n\t\tfor e := range counterEntityCh {\n\t\t\tcounterData := getCounterData(e)\n\t\t\tcdataChannel <- &counterData\n\t\t}\n\t}()\n\n\treturn cdataChannel, nil\n}", "func (sc *SoundCloud) Stream(track string) (io.ReadCloser, error) {\n\t// Get the HTTP Stream\n\trsp, err := http.Get(sc.streamUrl(track).String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Createa http stream buffer\n\tbuff := buffer.HTTPBuffer(rsp)\n\tgo buff.Buffer() // Start buffering\n\tscs := &SoundCloudStream{\n\t\tbuffer: buff,\n\t\tdecoder: &mpa.Reader{Decoder: &mpa.Decoder{Input: buff}},\n\t}\n\treturn scs, nil\n}", "func (p *Pool) Consume() <-chan interface{} {\n\treturn p.c\n}", "func BufferedChannels(){\n\tc := make(chan int, 2)\n\tc <- 1\n\tc <- 2\n\tfmt.Println(<-c)\n\tfmt.Println(<-c)\n}", "func (std *LineReaderService) Read() (<-chan []byte, error) {\n\tmc := make(chan []byte, 0)\n\n\tstd.pub.Subscribe(mc)\n\n\treturn mc, nil\n}", "func (s *Chan) Pipe(rwc io.ReadWriteCloser) {\n\ts.connection = rwc\n\tgo s.readFromReader(rwc)\n\tgo s.writeToWriter(rwc)\n}", "func outputData(outputChannel chan string) {\n\n\tfor {\n\t\tdata := <-outputChannel\n\t\tfmt.Println(data)\n\t}\n}", "func (l *ChannelList) Get(key string) *Channel {\n\t// get a conn bucket\n\tb := l.Bucket(key)\n\tb.Lock()\n\tif c, ok := b.data[key]; ok {\n\t\tb.Unlock()\n\t\tChStat.IncrAccess()\n\t\treturn c\n\t}\n\tb.Unlock()\n\treturn nil\n}", "func (c *requestWillBeSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportConnectionEstablishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (p *pool) get() (*channel, error) {\n\tif p.closed {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\tactiveChannel, ok := <-p.readyChannel\n\tif !ok {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\treturn activeChannel, nil\n}", "func (c *webSocketFrameErrorClient) GetStream() rpcc.Stream { return c.Stream }", "func (k *ChannelKeeper) Channel() *amqp.Channel {\n\treturn k.msgCh\n}", "func (this *FtpsClient) OpenFtpDataChannel(_FtpCommand_S string, _ExpectedReplyCode_i int) (rReplyCode_i int, rReplyMessage_S string, rRts error) {\n\trRts = this.sendRequestToFtpServerDataConn(_FtpCommand_S, _ExpectedReplyCode_i)\n\treturn\n}", "func (c *Client) StreamingDirect(ctx context.Context) (chan Event, error) {\n\treturn c.streaming(ctx, \"direct\", nil)\n}", "func (c *webSocketWillSendHandshakeRequestClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *remoteConn) OpenChannel(name string, data []byte) (ssh.Channel, error) {\n\tchannel, _, err := c.sconn.OpenChannel(name, data)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn channel, nil\n}", "func (ch *RingChannel) Out() <-chan interface{} {\n\treturn ch.output\n}", "func (r *realTimer) C() <-chan time.Time {\n\treturn r.timer.C\n}", "func (c *cdcClient) recv() {\n\tc.debug(\"recv call\")\n\tdefer c.debug(\"recv return\")\n\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tc.shutdown(err)\n\t\t}\n\t\tclose(c.events)\n\t}()\n\n\tvar now time.Time\n\tfor {\n\t\t_, bytes, rerr := c.wsConn.ReadMessage()\n\t\tnow = time.Now()\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {\n\t\t\t\terr = rerr\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// CDC events should be the bulk of data we recv, so presume it's that.\n\t\tvar e CDCEvent\n\t\tif err = json.Unmarshal(bytes, &e); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// If event ID is set (not empty), then it's a CDC event as expected\n\t\tif e.Id != \"\" {\n\t\t\tc.debug(\"cdc event: %#v\", e)\n\t\t\tselect {\n\t\t\tcase c.events <- e: // send CDC event to caller\n\t\t\tdefault:\n\t\t\t\tc.debug(\"caller blocked\")\n\t\t\t\tc.shutdown(ErrCallerBlocked)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// It's not a CDC event, so it should be a control message\n\t\t\tvar msg map[string]interface{}\n\t\t\tif err = json.Unmarshal(bytes, &msg); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, ok := msg[\"control\"]; !ok {\n\t\t\t\t// This shouldn't happen: data is not a CDC event or a control message\n\t\t\t\tc.shutdown(ErrBadData)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err = c.control(msg, now); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *Channel) Channels() Channels {\n\treturn c.children\n}", "func (r *chanReader) Read(data []byte) (int, error) {\n\tvar ok bool\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tn := copy(data, r.buf)\n\t\t\tr.buf = r.buf[n:]\n\t\t\tmsg := windowAdjustMsg{\n\t\t\t\tPeersId: r.clientChan.peersId,\n\t\t\t\tAdditionalBytes: uint32(n),\n\t\t\t}\n\t\t\treturn n, r.clientChan.writePacket(marshal(msgChannelWindowAdjust, msg))\n\t\t}\n\t\tr.buf, ok = <-r.data\n\t\tif !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}", "func bufferedChannelTest() {\n\tch := make(chan int, 2)\n\tch <- 1\n\tch <- 2\n\t// ch <- 3 \n\tfmt.Println(<-ch)\n\tfmt.Println(<-ch)\n}" ]
[ "0.6756691", "0.6601", "0.61970544", "0.6075765", "0.60143274", "0.59803826", "0.58934104", "0.5825283", "0.57866263", "0.5784679", "0.5767966", "0.57653207", "0.5760354", "0.57434285", "0.57309896", "0.5709145", "0.5659234", "0.56499964", "0.5649926", "0.56174", "0.56022227", "0.5538912", "0.55115026", "0.55064505", "0.54961675", "0.54940957", "0.54837894", "0.54825777", "0.5467813", "0.5461628", "0.5445262", "0.5438585", "0.5430984", "0.54254085", "0.54249144", "0.541178", "0.54028153", "0.53923357", "0.53917265", "0.5361076", "0.53496283", "0.5347493", "0.5326533", "0.5323118", "0.53187907", "0.53163517", "0.531059", "0.5302454", "0.53001654", "0.5286017", "0.52750295", "0.5241507", "0.5223528", "0.52209556", "0.52176374", "0.5210071", "0.5193128", "0.51851714", "0.5182929", "0.51680803", "0.51678467", "0.5167433", "0.5162985", "0.5132004", "0.51287216", "0.5122826", "0.5122826", "0.51204085", "0.5116976", "0.51167667", "0.5099191", "0.5091619", "0.5089214", "0.50816643", "0.50755", "0.50708544", "0.50654554", "0.50653684", "0.5060143", "0.50487965", "0.5048679", "0.50416267", "0.5034117", "0.5021917", "0.50192106", "0.5018358", "0.5006002", "0.49929616", "0.4987375", "0.49799228", "0.49749085", "0.49578887", "0.49488243", "0.49371094", "0.4936553", "0.49327564", "0.4926295", "0.49173513", "0.49082032", "0.49020994" ]
0.58118516
8
process continually returns a literal value with a "0" key.
func (p *literalProcessor) start() { go p.run() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (sc *SafeCounter) Value(key string) int {\n\tsc.mux.Lock()\n\t// Lock so only one goroutine at a time can access the map sc.v\n\tdefer sc.mux.Unlock()\n\treturn sc.v[key]\n}", "func (p *intPool) getZero() *big.Int {\n\tif p.pool.len() > 0 {\n\t\treturn p.pool.pop().SetUint64(0)\n\t}\n\treturn new(big.Int)\n}", "func (n *NilProcess) Calculi() string {\n\treturn \"0\"\n}", "func (c *counter) retrieve(key string) int {\n\tc.mux.Lock()\n\tfmt.Println(\"Inside retrieve: locked | current key value: \", c.v[key])\n\t// Lock so only one goroutine at a time can access the map c.v.\n\tdefer c.mux.Unlock() //this will get executed after main completes execution\n\treturn c.v[key]\n}", "func (t systemIntType) Zero() interface{} {\n\treturn int64(0)\n}", "func (t systemIntType) Zero() interface{} {\n\treturn int64(0)\n}", "func (self *CoreWorkload) buildSingleValue(key string) KVMap {\n\tfieldKey := self.fieldNames[self.fieldChooser.NextInt()]\n\tvar data []byte\n\tif self.dataIntegrity {\n\t\tdata = self.buildDeterministicValue(key, fieldKey)\n\t} else {\n\t\t// fill with random data\n\t\tdata = RandomBytes(self.fieldLengthGenerator.NextInt())\n\t}\n\treturn KVMap{\n\t\tfieldKey: data,\n\t}\n}", "func (b *baseCount) process(w word) {\n\tb.Lock()\n\tdefer b.Unlock()\n\n\tif _, ok := b.words[w]; !ok {\n\t\tb.words[w] = 0\n\t}\n\n\tb.words[w] += 1\n}", "func (Executor) ZeroValueResult() interface{} {\n\treturn Result{}\n}", "func MrbProcValue(p RProc) Value { return p.Value() }", "func (c *counter) Value() uint64 {\n\treturn atomic.LoadUint64(&c.val)\n}", "func (c *SafeCounter) Value() int {\n\tc.mu.Lock()\n\t// Lock so only one goroutine at a time can access the variable\n\tdefer c.mu.Unlock()\n\treturn c.v\n}", "func (r propertyTypeRegistry) ZeroValue(module, key string) data.Clonable {\n\tif zeroValue, found := r[module][key]; found {\n\t\tzv, err := zeroValue.Clone()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn zv\n\t}\n\n\tpanic(fmt.Sprintf(\"Missing match for key '%s' in module '%s' and go type! Please use PropertyTypeRegistry.Register!\", key, module))\n}", "func resetValue(w http.ResponseWriter, req *http.Request) {\n\treqBody, _ := ioutil.ReadAll(req.Body)\n\tvar counter Value\n\tjson.Unmarshal(reqBody, &counter)\n\tcounter.Value = 0\n\tjson.NewEncoder(w).Encode(counter)\n}", "func (sequence Sequence) Zero(c *Compiler) (expression Expression) {\n\texpression = c.NewExpression()\n\texpression.Type = sequence\n\n\texpression.Go.Write(sequence.Native(c))\n\texpression.Go.WriteString(`{}`)\n\treturn\n}", "func (a *Sequence) Value() uint64 {\n\ta.m.Lock()\n\tdefer a.m.Unlock()\n\treturn a.n\n}", "func (c *Counter) Value() uint64 {\n\treturn atomic.LoadUint64(c.addr())\n}", "func (this *DeployLock) value() int {\n\tthis.mutex.Lock()\n\tdefer this.mutex.Unlock()\n\treturn this.numStarted\n}", "func read(arg string) int {\n\t// we do not consume the key, but in real life the key will be consumed to get the value\n\t// from DB or a filesystem etc.\n\t// We simply return a random number between 0 and 100 (excluded 100).\n\treturn rand.Intn(100)\n}", "func genValue(i int, ev *replication.BinlogEvent) []byte {\n\traw := ev.RawData\n\trowValueIndex := ev.Event.(*replication.RowsEvent).RowValueIndex\n\treturn raw[rowValueIndex[i]:rowValueIndex[i+1]] // 共享内存\n}", "func (b ByteSlice) ValueOrZero() []byte {\n\tif !b.Valid {\n\t\treturn []byte{}\n\t}\n\treturn b.ByteSlice\n}", "func zeroval(ival int) {\n\tival = 0\n}", "func zeroval(ival int) {\n\tival = 0\n}", "func zeroval(ival int) {\n\tival = 0\n}", "func zeroval(ival int) {\n\tival = 0\n}", "func (dc *Int96DictConverter) FillZero(out interface{}) {\n\to := out.([]parquet.Int96)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (p RProc) Value() Value { return mrbObjValue(unsafe.Pointer(p.p)) }", "func (h Hasher) Zero() []byte {\n\thf := h.New()\n\tif hf == nil {\n\t\treturn nil\n\t}\n\treturn hf.Sum(nil)\n}", "func (NilUGauge) Value() uint64 { return 0 }", "func zeroValueData(s string) (map[string]interface{}, error) {\n\t//Create map\n\td := make(map[string]interface{})\n\n\t//Instantiate new dynamic message type from string name parsed\n\tt, err := NewDynamicMessageType(s)\n\tif err != nil {\n\t\treturn d, errors.Wrap(err, \"Failed to create NewDynamicMessageType \"+s)\n\t}\n\t//Range fields in the dynamic message type\n\tfor _, field := range t.spec.Fields {\n\t\tif field.IsArray {\n\t\t\t//It's an array. Create empty Slices\n\t\t\tswitch field.GoType {\n\t\t\tcase \"bool\":\n\t\t\t\td[field.Name] = make([]bool, 0)\n\t\t\tcase \"int8\":\n\t\t\t\td[field.Name] = make([]int8, 0)\n\t\t\tcase \"int16\":\n\t\t\t\td[field.Name] = make([]int16, 0)\n\t\t\tcase \"int32\":\n\t\t\t\td[field.Name] = make([]int32, 0)\n\t\t\tcase \"int64\":\n\t\t\t\td[field.Name] = make([]int64, 0)\n\t\t\tcase \"uint8\":\n\t\t\t\td[field.Name] = make([]uint8, 0)\n\t\t\tcase \"uint16\":\n\t\t\t\td[field.Name] = make([]uint16, 0)\n\t\t\tcase \"uint32\":\n\t\t\t\td[field.Name] = make([]uint32, 0)\n\t\t\tcase \"uint64\":\n\t\t\t\td[field.Name] = make([]uint64, 0)\n\t\t\tcase \"float32\":\n\t\t\t\td[field.Name] = make([]JsonFloat32, 0)\n\t\t\tcase \"float64\":\n\t\t\t\td[field.Name] = make([]JsonFloat64, 0)\n\t\t\tcase \"string\":\n\t\t\t\td[field.Name] = make([]string, 0)\n\t\t\tcase \"ros.Time\":\n\t\t\t\td[field.Name] = make([]Time, 0)\n\t\t\tcase \"ros.Duration\":\n\t\t\t\td[field.Name] = make([]Duration, 0)\n\t\t\tdefault:\n\t\t\t\t// In this case, it will probably be because the go_type is describing another ROS message, so we need to replace that with a nested DynamicMessage.\n\t\t\t\td[field.Name] = make([]Message, 0)\n\t\t\t}\n\t\t\tvar size uint32 = uint32(field.ArrayLen)\n\t\t\t//In the case the array length is static, we iterated through array items\n\t\t\tif field.ArrayLen != -1 {\n\t\t\t\tfor i := 0; i < int(size); i++ {\n\t\t\t\t\tif field.IsBuiltin {\n\t\t\t\t\t\t//Append the goType zeroValues to their arrays\n\t\t\t\t\t\tswitch field.GoType {\n\t\t\t\t\t\tcase \"bool\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]bool), false)\n\t\t\t\t\t\tcase \"int8\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]int8), 0)\n\t\t\t\t\t\tcase \"int16\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]int16), 0)\n\t\t\t\t\t\tcase \"int32\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]int32), 0)\n\t\t\t\t\t\tcase \"int64\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]int64), 0)\n\t\t\t\t\t\tcase \"uint8\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]uint8), 0)\n\t\t\t\t\t\tcase \"uint16\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]uint16), 0)\n\t\t\t\t\t\tcase \"uint32\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]uint32), 0)\n\t\t\t\t\t\tcase \"uint64\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]uint64), 0)\n\t\t\t\t\t\tcase \"float32\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]JsonFloat32), JsonFloat32{F: 0.0})\n\t\t\t\t\t\tcase \"float64\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]JsonFloat64), JsonFloat64{F: 0.0})\n\t\t\t\t\t\tcase \"string\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]string), \"\")\n\t\t\t\t\t\tcase \"ros.Time\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]Time), Time{})\n\t\t\t\t\t\tcase \"ros.Duration\":\n\t\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]Duration), Duration{})\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t// Something went wrong.\n\t\t\t\t\t\t\treturn d, errors.Wrap(err, \"Builtin field \"+field.GoType+\" not found\")\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Else it's not a builtin. Create a nested message type for values inside\n\t\t\t\t\t\tt2, err := newDynamicMessageTypeNested(field.Type, field.Package)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn d, errors.Wrap(err, \"Failed to create newDynamicMessageTypeNested \"+field.Type)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmsg := t2.NewMessage()\n\t\t\t\t\t\t//Append nested message map to message type array in main map\n\t\t\t\t\t\td[field.Name] = append(d[field.Name].([]Message), msg)\n\t\t\t\t\t}\n\t\t\t\t\t//Else array is dynamic, by default we do not initialize any values in it\n\t\t\t\t}\n\t\t\t}\n\t\t} else if field.IsBuiltin {\n\t\t\t//If its a built in type\n\t\t\tswitch field.GoType {\n\t\t\tcase \"string\":\n\t\t\t\td[field.Name] = \"\"\n\t\t\tcase \"bool\":\n\t\t\t\td[field.Name] = bool(false)\n\t\t\tcase \"int8\":\n\t\t\t\td[field.Name] = int8(0)\n\t\t\tcase \"int16\":\n\t\t\t\td[field.Name] = int16(0)\n\t\t\tcase \"int32\":\n\t\t\t\td[field.Name] = int32(0)\n\t\t\tcase \"int64\":\n\t\t\t\td[field.Name] = int64(0)\n\t\t\tcase \"uint8\":\n\t\t\t\td[field.Name] = uint8(0)\n\t\t\tcase \"uint16\":\n\t\t\t\td[field.Name] = uint16(0)\n\t\t\tcase \"uint32\":\n\t\t\t\td[field.Name] = uint32(0)\n\t\t\tcase \"uint64\":\n\t\t\t\td[field.Name] = uint64(0)\n\t\t\tcase \"float32\":\n\t\t\t\td[field.Name] = JsonFloat32{F: float32(0.0)}\n\t\t\tcase \"float64\":\n\t\t\t\td[field.Name] = JsonFloat64{F: float64(0.0)}\n\t\t\tcase \"ros.Time\":\n\t\t\t\td[field.Name] = Time{}\n\t\t\tcase \"ros.Duration\":\n\t\t\t\td[field.Name] = Duration{}\n\t\t\tdefault:\n\t\t\t\treturn d, errors.Wrap(err, \"Builtin field \"+field.GoType+\" not found\")\n\t\t\t}\n\t\t\t//Else its a ros message type\n\t\t} else {\n\t\t\t//Create new dynamic message type nested\n\t\t\tt2, err := newDynamicMessageTypeNested(field.Type, field.Package)\n\t\t\tif err != nil {\n\t\t\t\treturn d, errors.Wrap(err, \"Failed to create dewDynamicMessageTypeNested \"+field.Type)\n\t\t\t}\n\t\t\t//Append message as a map item\n\t\t\td[field.Name] = t2.NewMessage()\n\t\t}\n\t}\n\treturn d, err\n}", "func Process(cmd string) (float64, error) {\n\treturn 0, nil\n}", "func (a *AtomicInt) Value() int {\n\ta.mu.Lock()\n\tn := a.n\n\ta.mu.Unlock()\n\treturn n\n}", "func (item *KVItem) Value() []byte {\n\titem.wg.Wait()\n\treturn item.val\n}", "func (self *T) mZERO() {\r\n \r\n \r\n\t\t_type := T_ZERO\r\n\t\t_channel := antlr3rt.DEFAULT_TOKEN_CHANNEL\r\n\t\t// C:/dev/antlr.github/antlr/runtime/Go/antlr/test/T.g:8:5: ( '0' )\r\n\t\t// C:/dev/antlr.github/antlr/runtime/Go/antlr/test/T.g:8:7: '0'\r\n\t\t{\r\n\t\tself.MatchChar('0') \r\n\r\n\r\n\t\t}\r\n\r\n\t\tself.State().SetType( _type )\r\n\t\tself.State().SetChannel( _channel )\r\n}", "func (atomic *_atomic) Value() interface{} {\n\tatomic.mutex.Lock()\n\tv := atomic.value\n\tatomic.mutex.Unlock()\n\treturn v\n}", "func (self *CoreWorkload) buildDeterministicValue(key string, fieldKey string) []byte {\n\tsize := self.fieldLengthGenerator.NextInt()\n\tbuf := bytes.NewBuffer(make([]byte, 0, size))\n\tbuf.WriteString(key)\n\tbuf.WriteString(\":\")\n\tbuf.WriteString(fieldKey)\n\tfor int64(buf.Len()) < size {\n\t\tbuf.WriteString(\":\")\n\t\tbuf.WriteString(fmt.Sprintf(\"%d\", javaStringHashcode(buf.Bytes())))\n\t}\n\tbuf.Truncate(int(size))\n\treturn buf.Bytes()\n}", "func (s *CacheState) Value() CacheState {\n\treturn CacheState(atomic.LoadInt32((*int32)(s)))\n}", "func (m *DeviceHealthAttestationState) GetPcr0()(*string) {\n val, err := m.GetBackingStore().Get(\"pcr0\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (c *Counter) Value() uint64 {\n\treturn atomic.LoadUint64(&c.value)\n}", "func (dc *ByteArrayDictConverter) FillZero(out interface{}) {\n\to := out.([]parquet.ByteArray)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func getZero[T any]() T {\n\tvar result T\n\treturn result\n}", "func Test0() map[string]int16 {\n\tvar res0 map[string]int16\n\treturn res0\n}", "func Zero() Vect { return Vect{} }", "func (cache *Cache) Get(seqno uint16, result []byte) uint16 {\n\tcache.mu.Lock()\n\tdefer cache.mu.Unlock()\n\n\tn, _, _ := get(seqno, cache.entries, result)\n\tif n > 0 {\n\t\treturn n\n\t}\n\n\treturn 0\n}", "func (h *RedisHelper) GetNextVal(key string) (id int64, err error) {\n\tsql := \"INCR\"\n\tvar (\n\t\treply interface{}\n\t)\n\treply, err = h.Conn.Do(sql, key)\n\tif err != nil {\n\t\treturn\n\t}\n\tid, err = redis.Int64(reply, err)\n\treturn\n}", "func (dc *Int32DictConverter) FillZero(out interface{}) {\n\to := out.([]int32)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (c Counter) Value() (r int64) {\n\tc.Lock()\n\tr = c.n\n\tc.Unlock()\n\treturn\n}", "func _getCounter() uint64 {\n\treturn state.ReadUint64(COUNTER_KEY)\n}", "func ZeroValueTx(t *testing.T, tag trinary.Trytes) []trinary.Trytes {\n\n\tvar b bundle.Bundle\n\tentry := bundle.BundleEntry{\n\t\tAddress: utils.RandomKerlHashTrytesInsecure(),\n\t\tValue: 0,\n\t\tTag: tag,\n\t\tTimestamp: uint64(time.Now().UnixNano() / int64(time.Second)),\n\t\tLength: uint64(1),\n\t\tSignatureMessageFragments: []trinary.Trytes{trinary.MustPad(\"\", consts.SignatureMessageFragmentSizeInTrytes)},\n\t}\n\tb, err := bundle.Finalize(bundle.AddEntry(b, entry))\n\trequire.NoError(t, err)\n\n\treturn transaction.MustFinalTransactionTrytes(b)\n}", "func (dc *FixedLenByteArrayDictConverter) FillZero(out interface{}) {\n\to := out.([]parquet.FixedLenByteArray)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (p *Pet) GetPrimaryKeyZeroValue() interface{} {\n\treturn 0\n}", "func getVal(words map[string]int) int {\n\tf.Println(\"Give a unique positive integer value > 0:\")\n\n\treader := r.NewReader(os.Stdin)\n\ttext, err := reader.ReadString('\\n')\n\tif err != nil {\n\t\tf.Println(err)\n\t\treturn 0\n\t}\n\n\tval, err := scvt.Atoi(ss.Replace(text, \"\\n\", \"\", -1))\n\tif err != nil {\n\t\tf.Println(err)\n\t\treturn 0\n\t}\n\n\tif val <= 0 {\n\t\tf.Println(\"Please pick a number above 0\")\n\t\treturn 0\n\t}\n\n\tfor k, v := range words {\n\t\tif val == v {\n\t\t\tf.Printf(\"%v already found with key %q\\n\", v, k)\n\t\t\treturn 0\n\t\t}\n\t}\n\treturn val\n}", "func (mp *mirmap) access0(k voidptr, v *voidptr) int {\n\tv1 := mp.access1(k)\n\t// when key not exist, v1 will nil\n\tif v1 != nil {\n\t\tmemcpy3(v, v1, mp.valsz)\n\t\treturn 0\n\t}\n\treturn -1\n}", "func (t JsonType) Zero() interface{} {\n\t// MySQL throws an error for INSERT IGNORE, UPDATE IGNORE, etc. when bad json is encountered:\n\t// ERROR 3140 (22032): Invalid JSON text: \"Invalid value.\" at position 0 in value for column 'table.column'.\n\treturn nil\n}", "func (l Local) Process() string {\n\treturn fmt.Sprintf(\"This local is %d\\n\", l.values)\n}", "func (c *safeCounter) Value() int {\n\tc.mux.Lock()\n\tdefer c.mux.Unlock()\n\treturn c.cnt\n}", "func (i *BytesIterator) Value() Object {\n\treturn &Int{Value: int64(i.v[i.i-1])}\n}", "func (rc RedisComponent) Process() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg := <-*rc.msgStream:\n\t\t\t\t// Lookup key in database\n\t\t\t\tval, _ := rc.conn.Do(\"GET\", msg)\n\t\t\t\t// Build event structure\n\t\t\t\te := Event{\n\t\t\t\t\tURI: msg,\n\t\t\t\t\tValue: val,\n\t\t\t\t\tTime: time.Now().UTC(),\n\t\t\t\t}\n\t\t\t\t// Write event to outputStream\n\t\t\t\trc.dataStream <- e\n\t\t\t}\n\t\t}\n\t}()\n}", "func (p *Processor) ValueForNode(k string) (v uint16) {\n\treturn p.Nodes[k].Value(0)\n}", "func (dc *Int64DictConverter) FillZero(out interface{}) {\n\to := out.([]int64)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (c *Counter) Value() int64 {\n\treturn atomic.LoadInt64(&c.value)\n}", "func TestGetCount_NoValue(t *testing.T) {\n\texpectedCount := 0\n\tactualCount := pncounter.GetTotal()\n\n\tassert.Equal(t, expectedCount, actualCount)\n\n\tpncounter = pncounter.Clear(testNode)\n}", "func (svc *Service) ProcessAmount(ctx context.Context, amount int) *ServiceResponse {\n\tlog := logger.New(ctx)\n\tlog.WithField(\"amount\", amount)\n\tlog.Info(\"Initialize ProcessAmount\")\n\tres := &ServiceResponse{\n\t\tAmount: float64(amount),\n\t\tCoins: make(map[int]int),\n\t}\n\tif coinCount := svc.cache.Get(ctx, amount); coinCount != nil {\n\t\tres.Coins = coinCount\n\t\treturn res\n\t}\n\tfor amount > 0 {\n\t\tamount = setCoin(log, amount, res.Coins)\n\t}\n\tlog.WithField(\"coin_count\", res.Coins)\n\tlog.Info(\"coin Count Result\")\n\tsvc.cache.Set(ctx, int(res.Amount), res.Coins)\n\treturn res\n}", "func Zero() ID {\n\treturn nilID\n}", "func Test0() (res0 []byte) {\n\treturn res0\n}", "func ZeroValue(t JType) interface{} {\n\tif t.IsPrimitive() {\n\t\treturn basicZeroValue(t.Orig.Underlying().(*types.Basic))\n\t} else {\n\t\tv := \"new \" + t.JName() + \"(\"\n\t\tif t.NeedsAddress() {\n\t\t\tv += fmt.Sprint(FakeAddressFor(t.Ident))\n\t\t}\n\t\tv += \")\"\n\t\treturn v\n\t}\n}", "func (v *Version) intOrZero(input string) (value int) {\n\tif input != \"\" {\n\t\tvalue, _ = strconv.Atoi(input)\n\t}\n\treturn value\n}", "func (self *MonoTimer) Value() int64 {\n\tcurrentTime := GetMonoTime()\n\n\treturn currentTime - self.startTime\n}", "func (c *Count) Value() int64 {\n\tc.µ.Lock()\n\tdefer c.µ.Unlock()\n\treturn c.val\n}", "func (t systemSetType) Zero() interface{} {\n\treturn \"\"\n}", "func (c *changeCache) getInitialSequence() (initialSequence uint64) {\n\tc.lock.RLock()\n\tinitialSequence = c.initialSequence\n\tc.lock.RUnlock()\n\treturn initialSequence\n}", "func (stateObj *stateObject) Value() *big.Int {\n\tpanic(\"Value on stateObject should never be called\")\n}", "func (i *Inc) Execute(mem turing.Memory, _ turing.Cache) error {\n\t// borrow slice\n\tbuf, ref := fpack.Borrow(int64Len)\n\tdefer ref.Release()\n\n\t// encode count\n\tbuf = buf[:0]\n\tbuf = strconv.AppendInt(buf, i.Value, 10)\n\n\t// add value\n\terr := mem.Merge(i.Key, buf, Add)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (m *DeviceHealthAttestationState) SetPcr0(value *string)() {\n err := m.GetBackingStore().Set(\"pcr0\", value)\n if err != nil {\n panic(err)\n }\n}", "func Test0() uint {\n\tvar res0 uint\n\treturn res0\n}", "func (c *Int) Value() (v int) {\n\tc.mu.Lock()\n\tv = c.v\n\tc.mu.Unlock()\n\treturn\n}", "func (op *output) Value() uint64 {\n\treturn op.value\n}", "func (op *output) Value() uint64 {\n\treturn op.value\n}", "func (op *output) Value() uint64 {\n\treturn op.value\n}", "func (i NullInt) ValueOrZero() int64 {\n\tif !i.Valid {\n\t\treturn 0\n\t}\n\treturn i.Int64\n}", "func Test0() map[string]uint16 {\n\tvar res0 map[string]uint16\n\treturn res0\n}", "func rvZeroValue(rt reflect.Type, tt *Type) (reflect.Value, error) {\n\t// Easy fastpath; if the type doesn't contain inline typeobject or union, the\n\t// regular Go zero value is sufficient.\n\tif !tt.ContainsKind(WalkInline, kkTypeObjectOrUnion...) {\n\t\treturn reflect.Zero(rt), nil\n\t}\n\t// Handle typeobject, which has the AnyType zero value.\n\tif rt == rtPtrToType {\n\t\treturn rvAnyType, nil\n\t}\n\t// Handle native types by returning the native value filled in with a zero\n\t// value of the wire type.\n\tif ni := nativeInfoFromNative(rt); ni != nil {\n\t\trvWire := reflect.New(ni.WireType).Elem()\n\t\tttWire, err := TypeFromReflect(ni.WireType)\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\tswitch zero, err := rvZeroValue(ni.WireType, ttWire); {\n\t\tcase err != nil:\n\t\t\treturn reflect.Value{}, err\n\t\tdefault:\n\t\t\trvWire.Set(zero)\n\t\t}\n\t\trvNativePtr := reflect.New(rt)\n\t\tif err := ni.ToNative(rvWire, rvNativePtr); err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\treturn rvNativePtr.Elem(), nil\n\t}\n\t// Handle composite types with inline subtypes.\n\trv := reflect.New(rt).Elem()\n\tswitch {\n\tcase tt.Kind() == Union:\n\t\t// Set the union interface with the zero value of the type at index 0.\n\t\tri, _, err := deriveReflectInfo(rt)\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\tswitch zero, err := rvZeroValue(ri.UnionFields[0].RepType, tt.Field(0).Type); {\n\t\tcase err != nil:\n\t\t\treturn reflect.Value{}, err\n\t\tdefault:\n\t\t\trv.Set(zero)\n\t\t}\n\tcase rt.Kind() == reflect.Array:\n\t\tfor ix := 0; ix < rt.Len(); ix++ {\n\t\t\tswitch zero, err := rvZeroValue(rt.Elem(), tt.Elem()); {\n\t\t\tcase err != nil:\n\t\t\t\treturn reflect.Value{}, err\n\t\t\tdefault:\n\t\t\t\trv.Index(ix).Set(zero)\n\t\t\t}\n\t\t}\n\tcase rt.Kind() == reflect.Struct:\n\t\tfor ix := 0; ix < tt.NumField(); ix++ {\n\t\t\tfield := tt.Field(ix)\n\t\t\trvField := rv.FieldByName(field.Name)\n\t\t\tswitch zero, err := rvZeroValue(rvField.Type(), field.Type); {\n\t\t\tcase err != nil:\n\t\t\t\treturn reflect.Value{}, err\n\t\t\tdefault:\n\t\t\t\trvField.Set(zero)\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn reflect.Value{}, fmt.Errorf(\"vdl: rvZeroValue unhandled rt: %v tt: %v\", rt, tt)\n\t}\n\treturn rv, nil\n}", "func (c nullCache) Inc(k string, v uint64) (int64, error) {\n\treturn 0, nil\n}", "func (res Responder) WriteZero() int {\n\tn, _ := res.b.Write(binZERO)\n\treturn n\n}", "func (self *StateObject) Value() *big.Int {\n\tpanic(\"Value on StateObject should never be called\")\n}", "func (c *connection) process(pushCounter *uint64, inputs input) error {\n\n\treply, err := c.redis.Do(howMap[inputs.how].pop, inputs.source)\n\n\tif reply == nil {\n\t\treturn nil\n\t}\n\n\tpopData, err := redis.Bytes(reply, err)\n\n\tif err != nil {\n\t\tlog.Printf(\"Popped : %v\", string(popData))\n\t\tlog.Printf(\"Error occured while popping data , : %v\", err)\n\t\treturn err\n\t}\n\n\tif _, err := redis.Int(c.redis.Do(howMap[inputs.how].push, inputs.destination, popData)); err != nil {\n\t\tlog.Printf(\"Error occured while pushing data, : %v\", err)\n\t\treturn err\n\t}\n\n\t*pushCounter++\n\n\treturn nil\n\n}", "func zeroKey(b []byte) {\n\tfor i := range b {\n\t\tb[i] = 0\n\t}\n}", "func (this *Dcmp0_Chunk_ExtendedBody_DeltaEncoding32BitBody) FirstValue() (v int, err error) {\n\tif (this._f_firstValue) {\n\t\treturn this.firstValue, nil\n\t}\n\ttmp62, err := this.FirstValueRaw.Value()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tthis.firstValue = int(tmp62)\n\tthis._f_firstValue = true\n\treturn this.firstValue, nil\n}", "func (d *decoder) scalar(childKey string, value reflect.Value, def string) error {\n\tglobal := d.getGlobalProvider()\n\tvar val interface{}\n\n\t// For primitive values, just get the value and set it into the field\n\tif v2 := global.Get(childKey); v2.HasValue() {\n\t\tval = v2.Value()\n\t} else if def != \"\" {\n\t\tval = def\n\t}\n\n\treturn convert(childKey, &value, val)\n}", "func (l *Line) Value() (int, error) {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\tif l.closed {\n\t\treturn 0, ErrClosed\n\t}\n\tif l.abi == 1 {\n\t\thd := uapi.HandleData{}\n\t\terr := uapi.GetLineValues(l.vfd, &hd)\n\t\treturn int(hd[0]), err\n\t}\n\tlv := uapi.LineValues{Mask: 1}\n\terr := uapi.GetLineValuesV2(l.vfd, &lv)\n\treturn lv.Get(0), err\n}", "func (this *Dcmp0_Chunk_ExtendedBody_DeltaEncoding16BitBody) FirstValue() (v int, err error) {\n\tif (this._f_firstValue) {\n\t\treturn this.firstValue, nil\n\t}\n\ttmp56, err := this.FirstValueRaw.Value()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tthis.firstValue = int(tmp56)\n\tthis._f_firstValue = true\n\treturn this.firstValue, nil\n}", "func Count(payload []byte) ([]byte, error) {\n\t// Fetch current value from Database\n\tb, err := wapc.HostCall(\"tarmac\", \"kvstore\", \"get\", []byte(`{\"key\":\"kv_counter_example\"}`))\n\tif err != nil {\n\t\treturn []byte(fmt.Sprintf(`{\"payload\":\"%s\",\"status\":{\"code\":200,\"status\":\"Success\"}}`, base64.StdEncoding.EncodeToString([]byte(\"0\")))), nil\n\t}\n\tj, err := fastjson.ParseBytes(b)\n\tif err != nil {\n\t\treturn []byte(fmt.Sprintf(`{\"status\":{\"code\":500,\"status\":\"Failed to call parse json - %s\"}}`, err)), nil\n\t}\n\n\t// Check if value is missing and return 0 if empty\n\tif j.GetInt(\"status\", \"code\") != 200 {\n\t\treturn []byte(fmt.Sprintf(`{\"payload\":\"%s\",\"status\":{\"code\":200,\"status\":\"Success\"}}`, base64.StdEncoding.EncodeToString([]byte(\"0\")))), nil\n\t}\n\n\t// Return KV Stored data\n\treturn []byte(fmt.Sprintf(`{\"payload\":\"%s\",\"status\":{\"code\":200,\"status\":\"Success\"}}`, j.GetStringBytes(\"data\"))), nil\n}", "func IncCount(payload []byte) ([]byte, error) {\n\tgo func() {\n\t\t// Update custom metric\n\t\t_, err := wapc.HostCall(\"tarmac\", \"metrics\", \"counter\", []byte(`{\"name\":\"kv_counter_inc_called\"}`))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}()\n\n\ti := 0\n\n\t// Fetch current value from Database\n\tb, err := wapc.HostCall(\"tarmac\", \"kvstore\", \"get\", []byte(`{\"key\":\"kv_counter_example\"}`))\n\tif err == nil {\n\t\tj, err := fastjson.ParseBytes(b)\n\t\tif err != nil {\n\t\t\treturn []byte(fmt.Sprintf(`{\"status\":{\"code\":500,\"status\":\"Failed to call parse json - %s\"}}`, err)), nil\n\t\t}\n\n\t\t// Check if value is missing and return 0 if empty\n\t\tif j.GetInt(\"status\", \"code\") == 200 {\n\t\t\ts, err := base64.StdEncoding.DecodeString(string(j.GetStringBytes(\"data\")))\n\t\t\tif err == nil {\n\t\t\t\tn, err := strconv.Atoi(fmt.Sprintf(\"%s\", s))\n\t\t\t\tif err == nil {\n\t\t\t\t\ti = n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Increment Counter\n\ti += 1\n\ts := strconv.Itoa(i)\n\n\t// Store new Counter value\n\t_, err = wapc.HostCall(\"tarmac\", \"kvstore\", \"set\", []byte(fmt.Sprintf(`{\"key\":\"kv_counter_example\",\"data\":\"%s\"}`, base64.StdEncoding.EncodeToString([]byte(s)))))\n\tif err != nil {\n\t\treturn []byte(fmt.Sprintf(`{\"status\":{\"code\":500,\"status\":\"Failed to call host callback - %s\"}}`, err)), nil\n\t}\n\n\t// Return Counter value to user\n\treturn []byte(fmt.Sprintf(`{\"payload\":\"%s\",\"status\":{\"code\":200,\"status\":\"Success\"}}`, base64.StdEncoding.EncodeToString([]byte(s)))), nil\n}", "func getAtomicCounter() string {\n\tatomic.AddInt32(&counter, 1)\n\tif counter > 58*58*58*58-1 {\n\t\t// Reset the counter if we're beyond what we\n\t\t// can represent with 4 base58 characters\n\t\tatomic.StoreInt32(&counter, 0)\n\t}\n\tcounterBytes := base58.EncodeBig(nil, big.NewInt(int64(counter)))\n\tcounterStr := string(counterBytes)\n\tswitch len(counterStr) {\n\tcase 0:\n\t\treturn \"0000\"\n\tcase 1:\n\t\treturn \"000\" + counterStr\n\tcase 2:\n\t\treturn \"00\" + counterStr\n\tcase 3:\n\t\treturn \"0\" + counterStr\n\tdefault:\n\t\treturn counterStr[0:4]\n\t}\n}", "func (c *PCPCounter) Val() int64 {\n\tc.mutex.RLock()\n\tdefer c.mutex.RUnlock()\n\n\treturn c.val.(int64)\n}", "func (r *Remote) Process() string {\n\treturn fmt.Sprintf(\"This remote is %d\\n\", r.values)\n}", "func (p Polynom) ValueAt(x0 *big.Int) *big.Int {\n\tval := big.NewInt(0)\n\tfor i := len(p.coeff) - 1; i >= 0; i-- {\n\t\tval.Mul(val, x0)\n\t\tval.Add(val, p.coeff[i])\n\t\tval.Mod(val, p.mod)\n\t}\n\treturn val\n}", "func (bl *lfList) bucketValue(b int) int {\n return int(atomic.LoadInt32(&bl.b[b]))\n}", "func (s State) process(metric string) error {\n\tparts := strings.Split(metric, \"|\")\n\n\tif len(parts) < 1 {\n\t\treturn fmt.Errorf(\"invalid metric: \\\"%s\\\"\", metric)\n\t}\n\n\tnameValue := strings.Split(parts[0], \":\")\n\tif len(nameValue) < 2 {\n\t\treturn fmt.Errorf(\"invalid name/value pair: \\\"%s\\\"\", metric)\n\t}\n\n\tname := nameValue[0]\n\tvalue, err := strconv.ParseInt(nameValue[1], 10, 64)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can not convert value to int: \\\"%s\\\", %s\", metric, err)\n\t}\n\n\tif _, ok := s.Metrics[name]; !ok {\n\t\ts.Metrics[name] = 0\n\t}\n\ts.Metrics[name] += int(value)\n\tfmt.Printf(\"%s\\t (total: %d)\\n\", metric, s.Metrics[name])\n\treturn nil\n}", "func (u *UserCalculator) Process(int64) int64 {\n\tif !u.init {\n\t\tpanic(\"init was never called\")\n\t}\n\tif len(u.calc) == 0 {\n\t\treturn -1\n\t}\n\tu.changed = false\n\tfor _, c := range u.calc {\n\t\tif !c.Calc.Changed() {\n\t\t\tcontinue\n\t\t}\n\t\tu.changed = true\n\t\tif c.Calc.Active() {\n\t\t\tu.active = append(u.active, c.ID)\n\t\t} else {\n\t\t\tu.active = omitStr(u.active, c.ID)\n\t\t}\n\t}\n\n\treturn 0\n}", "func cpuperf() (int64, error) {\n\tvar diff time.Duration = 0\n\tvar i int64 = 0\n\tstart := time.Now()\n\n\tfor {\n\t\tif _, err := scrypt.Key(nil, nil, 128, 1, 1, 0); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ti += 512\n\t\tdiff = time.Since(start)\n\t\tif diff > 10*time.Millisecond {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn i * int64(time.Second) / int64(diff), nil\n}" ]
[ "0.5489842", "0.54571885", "0.5207902", "0.5192827", "0.515538", "0.515538", "0.5059694", "0.50570047", "0.5021366", "0.5017359", "0.50016963", "0.49647808", "0.49498284", "0.4916041", "0.48992616", "0.48711565", "0.4870216", "0.4869424", "0.4860917", "0.48558867", "0.4844697", "0.48411122", "0.48411122", "0.48411122", "0.48411122", "0.4828177", "0.4823968", "0.48194617", "0.48102176", "0.47774458", "0.47741178", "0.47698948", "0.47661954", "0.47621194", "0.4760909", "0.47575727", "0.47375914", "0.47336313", "0.47298175", "0.472649", "0.47195026", "0.47142947", "0.4712093", "0.47043765", "0.46950087", "0.46949297", "0.46914983", "0.46760422", "0.4671243", "0.46648002", "0.4661859", "0.4661552", "0.46583885", "0.4656743", "0.4649135", "0.4649023", "0.46478245", "0.464268", "0.464054", "0.46316138", "0.4618646", "0.4618221", "0.46074334", "0.4591553", "0.4582835", "0.45718223", "0.45648855", "0.45634708", "0.45629835", "0.45521525", "0.4545795", "0.45451733", "0.45348457", "0.45324227", "0.45273867", "0.45207885", "0.45205563", "0.45205563", "0.45205563", "0.45170444", "0.45155457", "0.4509909", "0.45041218", "0.4499971", "0.44951636", "0.44918227", "0.4491422", "0.44867462", "0.44843006", "0.44821155", "0.4477362", "0.44727477", "0.4467229", "0.44626635", "0.44616294", "0.44601256", "0.44583094", "0.44565248", "0.44543445", "0.44522142", "0.4442875" ]
0.0
-1
run executes the processor loop.
func (p *literalProcessor) run() { for { select { case ch := <-p.done: close(ch) return case p.c <- map[string]interface{}{"": p.val}: } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *Executor) Run() { e.loop() }", "func (r *Runner) run() {\n\tfor {\n\t\ttask := r.rq.Pop()\n\t\tr.process(task)\n\t}\n}", "func (p *SingleLineParser) run() {\n\tfor input := range p.inputChan {\n\t\tp.process(input)\n\t}\n\tp.lineHandler.Stop()\n}", "func (s *Service) run() {\n\n\t// Create a communicator for sending and receiving packets.\n\tcommunicator := comm.NewCommunicator(s.config.PollInterval, s.config.Port)\n\tdefer communicator.Stop()\n\n\t// Create a ticker for sending pings.\n\tpingTicker := time.NewTicker(s.config.PingInterval)\n\tdefer pingTicker.Stop()\n\n\t// Create a ticker for timeout checks.\n\tpeerTicker := time.NewTicker(s.config.PeerTimeout)\n\tdefer peerTicker.Stop()\n\n\t// Create the packet that will be sent to all peers.\n\tpkt := &comm.Packet{\n\t\tID: s.config.ID,\n\t\tUserData: s.config.UserData,\n\t}\n\n\t// Continue processing events until explicitly stopped.\n\tfor {\n\t\tselect {\n\t\tcase p := <-communicator.PacketChan:\n\t\t\ts.processPacket(p)\n\t\tcase <-pingTicker.C:\n\t\t\tcommunicator.Send(pkt)\n\t\tcase <-peerTicker.C:\n\t\t\ts.processPeers()\n\t\tcase <-s.stopChan:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (tfm *trxFlowMonitor) run() {\n\t// make sure we are orchestrated\n\tif tfm.mgr == nil {\n\t\tpanic(fmt.Errorf(\"no svc manager set on %s\", tfm.name()))\n\t}\n\n\t// start go routine for processing\n\ttfm.mgr.started(tfm)\n\tgo tfm.execute()\n}", "func (bf *brainfog) run() {\n\tfor bf.ip < len(bf.program) {\n\t\terr := bf.doInstruction()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tclose(bf.outCh)\n}", "func (conn *Conn) runLoop() {\n\tfor {\n\t\tselect {\n\t\tcase line := <-conn.in:\n\t\t\tconn.ED.Dispatch(line.Cmd, conn, line)\n\t\tcase <-conn.cLoop:\n\t\t\t// strobe on control channel, bail out\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *Cyclone) run() {\n\nrunloop:\n\tfor {\n\t\tselect {\n\t\tcase <-c.Shutdown:\n\t\t\t// received shutdown, drain input channel which will be\n\t\t\t// closed by main\n\t\t\tgoto drainloop\n\t\tcase msg := <-c.Input:\n\t\t\tif msg == nil {\n\t\t\t\t// this can happen if we read the closed Input channel\n\t\t\t\t// before the closed Shutdown channel\n\t\t\t\tcontinue runloop\n\t\t\t}\n\t\t\tif err := c.process(msg); err != nil {\n\t\t\t\tc.Death <- err\n\t\t\t\t<-c.Shutdown\n\t\t\t\tbreak runloop\n\t\t\t}\n\t\t}\n\t}\n\ndrainloop:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-c.Input:\n\t\t\tif msg == nil {\n\t\t\t\t// channel is closed\n\t\t\t\tbreak drainloop\n\t\t\t}\n\t\t\tc.process(msg)\n\t\t}\n\t}\n}", "func (r *serverOneDotOne) Run() {\n\tr.state = RstateRunning\n\n\t// event handling is a NOP in this model\n\trxcallback := func(ev EventInterface) int {\n\t\tassert(r == ev.GetTarget())\n\t\tlog(LogVV, \"proc-ed\", ev.String())\n\t\treturn 0\n\t}\n\n\tgo func() {\n\t\tfor r.state == RstateRunning {\n\t\t\tr.receiveEnqueue()\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t\tr.processPendingEvents(rxcallback)\n\t\t}\n\t}()\n}", "func (s *scene) run(ctx context.Context, r *sdl.Renderer) chan error {\n\terrc := make(chan error)\n\tgo func() {\n\t\tdefer close(errc)\n\t\tfor range time.Tick(10 * time.Millisecond) {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tif err := s.paint(r); err != nil {\n\t\t\t\t\terrc <- err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn errc\n}", "func (ftm *FtmBridge) run() {\n\tftm.wg.Add(1)\n\tgo ftm.observeBlocks()\n}", "func (s *MetalLBSpeaker) run(ctx context.Context) {\n\tl := log.WithFields(\n\t\tlogrus.Fields{\n\t\t\t\"component\": \"MetalLBSpeaker.run\",\n\t\t},\n\t)\n\tfor {\n\t\t// only check ctx here, we'll allow any in-flight\n\t\t// events to be processed completely.\n\t\tif ctx.Err() != nil {\n\t\t\treturn\n\t\t}\n\t\t// previous to this iteration, we processed an event\n\t\t// which indicates the speaker should yield. shut\n\t\t// it down.\n\t\tif s.shutdown.Load() {\n\t\t\tl.Info(\"speaker shutting down.\")\n\t\t\treturn\n\t\t}\n\t\tkey, quit := s.queue.Get()\n\t\tif quit {\n\t\t\treturn\n\t\t}\n\t\tl.Info(\"processing new event.\")\n\t\tst := s.do(key)\n\t\tswitch st {\n\t\tcase types.SyncStateError:\n\t\t\ts.queue.Add(key)\n\t\t\t// done must be called to requeue event after add.\n\t\tcase types.SyncStateSuccess, types.SyncStateReprocessAll:\n\t\t\t// SyncStateReprocessAll is returned in MetalLB when the\n\t\t\t// configuration changes. However, we are not watching for\n\t\t\t// configuration changes because our configuration is static and\n\t\t\t// loaded once at Cilium start time.\n\t\t}\n\t\t// if queue.Add(key) is called previous to this invocation the event\n\t\t// is requeued, else it is discarded from the queue.\n\t\ts.queue.Done(key)\n\t}\n}", "func (m *Monitor) run(ctx context.Context) {\n\tdefer close(m.stopCh)\n\tdefer close(m.events)\n\tdefer m.log.Info(\"event loop stopped\")\n\tf := filters.NewArgs()\n\tf.Add(\"event\", \"start\")\n\tf.Add(\"event\", \"die\")\n\toptions := types.EventsOptions{Filters: f}\n\tfor {\n\t\terr := func() error {\n\t\t\tm.log.Info(\"processing existing containers\")\n\t\t\tif err := m.processContainers(ctx); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.log.Info(\"starting event loop\")\n\t\t\tmsgChan, errChan := m.client.Events(ctx, options)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase msg := <-msgChan:\n\t\t\t\t\tif err := m.processMessage(ctx, msg); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tcase err := <-errChan:\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tif err == context.Canceled {\n\t\t\treturn\n\t\t}\n\t\tm.log.Error(err)\n\t\tm.log.Info(\"reconnecting in 30 seconds\")\n\t\tselect {\n\t\tcase <-time.After(30 * time.Second):\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (this *Connection) run() {\n\tgo this.routineMain()\n}", "func (gb *GameBoy) Run() {\n\t// Number of extra clocks consumed in the last tick.\n\tclockDebt := 0\n\n\tfor {\n\t\tselect {\n\n\t\tcase <-gb.clk.C:\n\t\t\tclockDebt = gb.RunClocks(CPUClock/BaseClock - clockDebt)\n\n\t\tcase event := <-gb.events:\n\t\t\tgb.jp.Handle(event)\n\n\t\tcase frame := <-gb.ppu.F:\n\t\t\tselect {\n\t\t\tcase gb.F <- frame:\n\t\t\tdefault:\n\t\t\t}\n\n\t\t}\n\t}\n}", "func (r *reaper) runLoop() {\n\tfor {\n\t\tselect {\n\t\tcase <-r.sigs:\n\t\t\tprocs, err := ps.Processes()\n\t\t\tif err != nil {\n\t\t\t\tklog.Warningf(\"reaper: failed to get all procs: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, p := range procs {\n\t\t\t\t\treaped := waitIfZombieStunnel(p)\n\t\t\t\t\tif reaped {\n\t\t\t\t\t\t// wait for only one process per SIGCHLD received over channel. It\n\t\t\t\t\t\t// doesn't have to be the same process that triggered the\n\t\t\t\t\t\t// particular SIGCHLD (there's no way to tell anyway), the\n\t\t\t\t\t\t// intention is to reap zombies as they come.\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-r.stopCh:\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (h *AutoscalersController) RunControllerLoop(stopCh <-chan struct{}) {\n\th.processingLoop(stopCh)\n}", "func (r *RunloopShortCircuiter) Run() {\n\tfor !r.complated {\n\t\tnextEvent := app.NSApplication_nextEventMatchingMask_untilDate_inMode_dequeue(app.NSApp(),\n\t\t\tevent.NSAnyEventMask, // mask\n\t\t\tdate.NSDate_distantFuture(), // expiration.\n\t\t\trunloop.NSDefaultRunLoopMode, // mode.\n\t\t\ttrue, // flag\n\t\t)\n\t\tif nextEvent == 0 {\n\t\t\tbreak\n\t\t}\n\t\tapp.NSApplication_sendEvent(app.NSApp(), nextEvent)\n\t}\n\tr.complated = false\n}", "func (s *Server) loopRun(pc *PeerConn, handler Handler) error {\n\tfor {\n\t\tmsg, err := pc.ReadMsg()\n\t\tswitch err {\n\t\tcase nil:\n\t\tcase io.EOF:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\ts := err.Error()\n\t\t\tif strings.Contains(s, \"closed\") {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"fail to decode the message from '%s': %s\",\n\t\t\t\tpc.RemoteAddr().String(), s)\n\t\t}\n\n\t\tif err = s.Config.HandleMessage(pc, msg, handler); err != nil {\n\t\t\treturn fmt.Errorf(\"fail to handle peer message from '%s': %s\",\n\t\t\t\tpc.RemoteAddr().String(), err)\n\t\t}\n\t}\n}", "func (v *V2RayPoint) RunLoop() {\n\tgo v.pointloop()\n}", "func (ip *ImageProcessor) Run() {\n\tfor {\n\t\tpc := <-ip.Chan\n\t\t// Set R, G, and B\n\t\tip.Image.Pix[(uint32(pc.Y)*800*4)+(uint32(pc.X)*4)] = pc.Red\n\t\tip.Image.Pix[(uint32(pc.Y)*800*4)+(uint32(pc.X)*4)+1] = pc.Green\n\t\tip.Image.Pix[(uint32(pc.Y)*800*4)+(uint32(pc.X)*4)+2] = pc.Blue\n\t\twriteImage(ip.OutFile, &ip.Image)\n\t}\n}", "func (trd *trxDispatcher) run() {\n\t// make sure we are orchestrated\n\tif trd.mgr == nil {\n\t\tpanic(fmt.Errorf(\"no svc manager set on %s\", trd.name()))\n\t}\n\n\t// start the block observer ticker\n\ttrd.bot = time.NewTicker(trxDispatchBlockUpdateTicker)\n\n\t// signal orchestrator we started and go\n\ttrd.mgr.started(trd)\n\tgo trd.execute()\n}", "func (r *reaper) start() {\n\tgo r.runLoop()\n}", "func (ip *Interpreter) Run() {\n\t// initialise stop channel\n\tip.stopch = make(chan struct{})\n\n\t// load sprites\n\tip.loadSprites()\n\n\t// start sound and delay timers\n\tip.stget, ip.stset, ip.ststop = newTimer()\n\tip.dtget, ip.dtset, ip.dtstop = newTimer()\n\n\t// set PC to program start address\n\tip.pc = memoryOffsetProgram\n\n\tcurrentTime := time.Now()\n\tvar accum time.Duration\n\n\tticker := time.NewTicker(TimestepBatch)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase newTime := <-ticker.C:\n\t\t\tframeTime := newTime.Sub(currentTime)\n\t\t\tcurrentTime = newTime\n\t\t\taccum += frameTime\n\t\t\tfor accum >= TimestepSimulation {\n\t\t\t\tip.step()\n\t\t\t\taccum -= TimestepSimulation\n\t\t\t}\n\t\tcase <-ip.stopch:\n\t\t\t// stop delay and sound timers\n\t\t\tip.ststop <- struct{}{}\n\t\t\tip.dtstop <- struct{}{}\n\t\t\tclose(ip.displaych)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *csiManager) Run() {\n\tgo c.runLoop()\n}", "func (w *Worker) run(){\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase f := <- w.task:\n\t\t\t\tif f == nil {\n\t\t\t\t\tw.pool.decRunning()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tf()\n\t\t\t\tw.pool.putWorker(w)\n\t\t\tcase args := <- w.args:\n\t\t\t\tif args == nil {\n\t\t\t\t\tw.pool.decRunning()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tw.pool.poolFunc(args)\n\t\t\t\tw.pool.putWorker(w)\n\t\t\t}\n\t\t}\n\t}()\n}", "func (m *Manager) run() {\n\tfor i := 0; i < m.workerPool.MaxWorker; i++ {\n\t\twID := i + 1\n\t\t//log.Printf(\"[workerPool] worker %d spawned\", wID)\n\t\tgo func(workerID int) {\n\t\t\tfor task := range m.workerPool.queuedTaskC {\n\t\t\t\tlog.Printf(\"[workerPool] worker %d is processing task\", wID)\n\t\t\t\ttask()\n\t\t\t\tlog.Printf(\"[workerPool] worker %d has finished processing task\", wID)\n\t\t\t}\n\t\t}(wID)\n\t}\n}", "func (conn *Conn) runLoop(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase line := <-conn.in:\n\t\t\tconn.dispatch(line)\n\t\tcase <-ctx.Done():\n\t\t\t// control channel closed, trigger Cancel() to clean\n\t\t\t// things up properly and bail out\n\n\t\t\t// We can't defer this, because Close() waits for it.\n\t\t\tconn.wg.Done()\n\t\t\tconn.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *blockParser) run() {\n\tfor p.state = parseBegin; p.state != nil; {\n\t\tp.state = p.state(p)\n\t}\n\tclose(p.blockChan)\n}", "func (a *actorManager) run() error {\n\t// Continually receive messages\n\tfor {\n\t\t// Get next message\n\t\tvar msg actorMessage\n\t\tif err := a.firefox.remote.recv(&msg); err != nil {\n\t\t\tif a.firefox.runCtx.Err() != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\ta.actorsLock.RLock()\n\t\tactor := a.actors[msg.From]\n\t\ta.actorsLock.RUnlock()\n\t\tif actor != nil {\n\t\t\tactor.onMessage(&msg)\n\t\t}\n\t}\n}", "func (r *Reader) run() {\n\tdefer r.cancel()\n\tdefer close(r.done)\n\tdefer r.stmt.Close()\n\n\tvar err error\n\n\tfor err == nil {\n\t\terr = r.tick()\n\t}\n\n\tif err != context.Canceled {\n\t\tr.done <- err\n\t}\n}", "func (s *Schedule) run() {\n\tticker := time.NewTicker(s.interval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\ts.logger.Debug(\"msg\", \"scheduled run started\", \"name\", s.name)\n\t\t\terr := s.fun()\n\t\t\ts.logger.Debug(\"msg\", \"scheduled run finished\", \"name\", s.name, \"error\", err)\n\t\tcase <-s.ctx.Done():\n\t\t\tticker.Stop()\n\t\t}\n\t}\n}", "func (c *Processor) Run() (err error) {\n\tif c.M == nil {\n\t\treturn errNoProgram\n\t}\n\tfor {\n\t\terr = c.Step()\n\t\tif err != nil || c.runState == RunStateStopped {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}", "func (r *serverFour) Run() {\n\tr.state = RstateRunning\n\n\trxcallback := func(ev EventInterface) int {\n\t\ttioevent := ev.(*TimedAnyEvent)\n\t\ttio := tioevent.GetTio()\n\t\ttio.doStage(r)\n\n\t\treturn 0\n\t}\n\n\tgo func() {\n\t\tfor r.state == RstateRunning {\n\t\t\t// recv\n\t\t\tr.receiveEnqueue()\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t\tr.processPendingEvents(rxcallback)\n\t\t}\n\n\t\tr.closeTxChannels()\n\t}()\n}", "func (s *scanner) run() {\n\tfor state := scanMain; state != nil; {\n\t\tstate = state(s)\n\t}\n\tclose(s.items)\n}", "func (h *Hub) run(ctx context.Context) {\n\t// Unsubscribe when finished.\n\tdefer func() {\n\t\terr := h.unsubscribeOnNATSSubects()\n\t\tif err != nil {\n\t\t\th.logger.Warningf(\"could not unsubscribe on NATS server: %w\", err)\n\t\t}\n\t}()\n\n\t// Start event handling loop.\n\th.logger.Infof(\"started NATS event receiver loop\")\n\tdefer h.logger.Infof(\"stopped NATS event receiver loop\")\n\n\tfor {\n\t\tloopCtx := context.Background()\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\th.logger.Infof(\"stopping NATS hub by context done\")\n\n\t\tcase msg := <-h.diceRollCreatedChan:\n\t\t\th.logger.Debugf(\"diceRollCreated NATS event received, broadcasting\")\n\t\t\terr := h.handleDiceRollCreatedEvent(loopCtx, msg.Data)\n\t\t\tif err != nil {\n\t\t\t\th.logger.Errorf(\"could not handle diceRollCreated event: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (n *NodeDrainer) run(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-n.ctx.Done():\n\t\t\treturn\n\t\tcase nodes := <-n.deadlineNotifier.NextBatch():\n\t\t\tn.handleDeadlinedNodes(nodes)\n\t\tcase req := <-n.jobWatcher.Drain():\n\t\t\tn.handleJobAllocDrain(req)\n\t\tcase allocs := <-n.jobWatcher.Migrated():\n\t\t\tn.handleMigratedAllocs(allocs)\n\t\t}\n\t}\n}", "func (e *EchoTester) Run() {\n\tstart := iclock()\n\tfor e.clt.pongCount < maxCount {\n\t\ttime.Sleep(1 * time.Millisecond)\n\t\te.tickMs()\n\t}\n\te.printResult(start)\n\te.clt.SaveRtt()\n}", "func (p *Processor) Run(ctx context.Context) error {\n\terr := p.runImpl(ctx)\n\n\t// the context is the proper way to close down the Run() loop, so it's not\n\t// an error and doesn't need to be returned.\n\tif ctx.Err() != nil {\n\t\treturn nil\n\t}\n\n\treturn err\n}", "func (p *GaugeCollectionProcess) Run() {\n\tdefer close(p.stopped)\n\n\t// Wait a random amount of time\n\tstopReceived := p.delayStart()\n\tif stopReceived {\n\t\treturn\n\t}\n\n\t// Create a ticker to start each cycle\n\tp.resetTicker()\n\n\t// Loop until we get a signal to stop\n\tfor {\n\t\tselect {\n\t\tcase <-p.ticker.C:\n\t\t\tp.collectAndFilterGauges()\n\t\tcase <-p.stop:\n\t\t\t// Can't use defer because this might\n\t\t\t// not be the original ticker.\n\t\t\tp.ticker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (r *Raft) run() {\n\tfor {\n\t\t// Check if we are doing a shutdown\n\t\tselect {\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\t// Enter into a sub-FSM\n\t\tswitch r.getState() {\n\t\tcase Follower:\n\t\t\tr.runFollower()\n\t\tcase Candidate:\n\t\t\tr.runCandidate()\n\t\tcase Leader:\n\t\t\tr.runLeader()\n\t\t}\n\t}\n}", "func (p *Probe) loop() {\n\tdefer close(p.stopped)\n\n\t// Do a first probe right away, so that the prober immediately exports results for everything.\n\tp.run()\n\tfor {\n\t\tselect {\n\t\tcase <-p.tick.Chan():\n\t\t\tp.run()\n\t\tcase <-p.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (pr *PolicyReflector) run() {\n\tdefer pr.wg.Done()\n\n\tpr.Log.Info(\"Policy reflector is now running\")\n\tpr.k8sPolicyController.Run(pr.stopCh)\n\tpr.Log.Info(\"Stopping Policy reflector\")\n}", "func (a *Actor) run() {\n\tif !a.Path.Equals(a.System.Logger.Path()) && a.System.config.Logging.LogLifecycle {\n\t\ta.System.Logger.Log(a.Path.String(), \"starting\")\n\t}\n\n\tfor {\n\t\tenvelope := a.mailbox.Dequeue() // Blocks until message ready\n\n\t\tif !a.Path.Equals(a.System.Logger.Path()) && a.System.config.Logging.LogReceive {\n\t\t\ta.System.Logger.Logf(\"%s <- %#v\", a.name, envelope)\n\t\t}\n\n\t\tswitch envelope.message.body.(type) {\n\t\tcase StopMessage:\n\t\t\ta.stopping()\n\t\t\treturn\n\t\tdefault:\n\t\t\tif a.receiver != nil {\n\t\t\t\tif envelope.message.sender == nil {\n\t\t\t\t\ta.receiver.Receive(envelope.message.body, a.System.DeadLetters, a)\n\t\t\t\t} else {\n\t\t\t\t\ta.receiver.Receive(envelope.message.body, envelope.message.sender, a)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *SGController) runWorker() {\n\tfor m.processNextWorkItem() {\n\t}\n}", "func (w *Worker) run(tasks chan *ReadTaskOp, store chan *WriteStoreOp, service chan *ComplainOp) {\n\tfor {\n\t\ttime.Sleep(time.Duration(config.WorkerSpeed * time.Millisecond))\n\t\tnumber := rand.Int() % 100\n\t\tif number < config.WorkerSensitive {\n\t\t\tw.getAndExecute(tasks, store, service)\n\t\t}\n\t}\n}", "func (mgr *manager) run() {\n\tlog(mgr.reportingTo.Name(), \"working\", nil, false)\n\tdefer log(mgr.reportingTo.Name(), \"all done\", nil, false)\n\tstepFn := mgr.step_Accepting\n\tfor {\n\t\tif stepFn == nil {\n\t\t\tbreak\n\t\t}\n\t\tstepFn = stepFn()\n\t}\n}", "func (p *literalProcessor) start() { go p.run() }", "func (task taskStub) run(fetchInterval, fetchDelay time.Duration) {\n\tfor {\n\t\tselect {\n\t\tcase <-task.ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\thasMore, err := task.f()\n\t\t\tif err != nil {\n\t\t\t\ttask.l.Error(\"task.f task run failed\", log.Error(err))\n\t\t\t\ttime.Sleep(fetchInterval)\n\t\t\t\tcontinue\n\t\t\t} else if !hasMore {\n\t\t\t\ttime.Sleep(fetchInterval)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// have more jobs, wait delay time to fetch next time\n\t\t\ttime.Sleep(fetchDelay)\n\t\t}\n\t}\n}", "func (s *server) loop() {\n\tfor {\n\t\tselect {\n\t\tcase op := <-s.ops:\n\t\t\top()\n\t\t}\n\t}\n}", "func (h *EventHandler) Run() {\n\tticker := time.NewTicker(2 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !h.needHandle || h.isDoing {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\th.needHandle = false\n\t\t\th.isDoing = true\n\t\t\t// if has error\n\t\t\tif h.doHandle() {\n\t\t\t\th.needHandle = true\n\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t}\n\t\t\th.isDoing = false\n\t\tcase <-h.ctx.Done():\n\t\t\tblog.Infof(\"EventHandler for %s run loop exit\", h.lbID)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (m *mapper) run() {\n\tfor m.itr.NextIterval() {\n\t\tm.fn(m.itr, m)\n\t}\n\tclose(m.c)\n}", "func (pm *PipelineManager) runWorker() {\n\tfor pm.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (fm *Fesl) Run(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase event := <-fm.socket.EventChan:\n\t\t\tfm.Handle(event)\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (loop *mainLoop) Run() {\n\tloop.running = true\n\n\tfor loop.running {\n\t\tselect {\n\t\t// Send a value over the channel in order to\n\t\t// signal that the loop started.\n\t\tcase loop.initialized <- 1:\n\n\t\t\t// A request to pause the loop is received.\n\t\tcase <-loop.PauseCh:\n\t\t\t// do something or simply send-back a value to\n\t\t\t// the pause channel.\n\t\t\tloop.PauseCh <- 0\n\n\t\t\t// A request to terminate the loop is received.\n\t\tcase <-loop.TerminateCh:\n\t\t\tloop.running = false\n\t\t\tloop.TerminateCh <- 0\n\n\t\t\t// Receive a tick from the ticker.\n\t\tcase <-loop.ticker.C:\n\t\t\t// Initiate the exit procedure.\n\t\t\tapplication.Exit()\n\n\t\t\t// Receive a duration string and create a proper\n\t\t\t// ticker from it.\n\t\tcase durationStr := <-loop.durationCh:\n\t\t\tduration, err := time.ParseDuration(durationStr)\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"Error parsing a duration string.\")\n\t\t\t}\n\t\t\tloop.ticker = time.NewTicker(duration)\n\t\t\tapplication.Logf(\"A new duration received. Running for %s...\", durationStr)\n\t\t}\n\t}\n}", "func (brw *blockRetrievalWorker) run() {\n\tfor {\n\t\terr := brw.HandleRequest()\n\t\t// Only io.EOF is relevant to the loop; other errors are handled in\n\t\t// FinalizeRequest\n\t\tif err == io.EOF {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *ConsensusState) run() {\n\tfor {\n\t\tselect {\n\t\tcase op := <-c.accessOp:\n\t\t\tlogger.Debugf(\"cycle %v execute op\", c.cycleId)\n\t\t\top.Execute()\n\t\t}\n\t}\n}", "func run() {\n\tglobal.gVariables.Load(wConfigFile)\n\n\t// Initialize window\n\tcfg := pixelgl.WindowConfig{\n\t\tTitle: wWindowTitle,\n\t\tBounds: pixel.R(0, 0, global.gVariables.WindowWidth, global.gVariables.WindowHeight),\n\t\tVSync: true,\n\t}\n\tgWin, err := pixelgl.NewWindow(cfg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgWin.SetCursorVisible(false)\n\tglobal.gWin = gWin\n\n\tsetup()\n\n\tgameLoop()\n}", "func (ac *asyncCallbacksHandler) run() {\n\tfor {\n\t\tf := <-ac.cbQueue\n\t\tif f == nil {\n\t\t\treturn\n\t\t}\n\t\tf()\n\t}\n}", "func (m *ntpManager) run() {\n\tt := time.NewTicker(m.checkInterval)\n\tdefer func() {\n\t\tt.Stop()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-m.shutdown:\n\t\t\treturn\n\t\tcase <-t.C:\n\t\t\terr := m.processTime()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(log.TimeMgr, err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (bl *LogBuffer) run() {\n\tfor {\n\t\tmsg, err := bl.ringBuffer.Pop()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif err := bl.logger.WriteLogMessage(msg); err != nil {\n\t\t\tlogrus.Debugf(\"failed to write log %v with log driver %s\", msg, bl.logger.Name())\n\t\t}\n\t}\n}", "func (dfdd *dfddImpl) run() {\n\n\tticker := common.NewTimer(stateMachineTickerInterval)\n\n\tfor {\n\t\tselect {\n\t\tcase e := <-dfdd.inputListenerCh:\n\t\t\tdfdd.handleListenerEvent(common.InputServiceName, e)\n\t\tcase e := <-dfdd.storeListenerCh:\n\t\t\tdfdd.handleListenerEvent(common.StoreServiceName, e)\n\t\tcase <-ticker.C:\n\t\t\tdfdd.handleTicker()\n\t\t\tticker.Reset(stateMachineTickerInterval)\n\t\tcase <-dfdd.shutdownC:\n\t\t\tdfdd.shutdownWG.Done()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (t *task) run(ctx context.Context) {\n\tgo func() {\n\t\tresult, err := t.handler(ctx, t.request)\n\t\tt.resultQ <- Response{Result: result, Err: err} // out channel is buffered by 1\n\t\tt.running = false\n\t\tclose(t.resultQ)\n\t}()\n}", "func (b *Builder) run() {\n\tfor {\n\t\ttask := b.bq.Pop()\n\t\tb.process(task)\n\t}\n}", "func (gb *GameBoy) Run() {\n\tif err := gb.APU.Start(); err != nil {\n\t\tpanic(err)\n\t}\n\tdsTick := false\n\tfor {\n\t\tselect {\n\t\tcase _, _ = <-gb.exitChan:\n\t\t\tgb.APU.Stop()\n\t\t\treturn\n\t\tdefault:\n\t\t\tgb.Timer.Prepare()\n\t\t\tgb.CPU.Step()\n\t\t\tgb.MMU.Step()\n\t\t\tgb.Timer.Step()\n\t\t\tgb.Serial.Step()\n\t\t\tif !dsTick {\n\t\t\t\tgb.APU.Step()\n\t\t\t\tgb.PPU.Step()\n\t\t\t\tdsTick = gb.CPU.DoubleSpeed()\n\t\t\t} else {\n\t\t\t\tdsTick = false\n\t\t\t}\n\t\t}\n\t}\n}", "func (transport *IRCTransport) Run() {\n\tdefer transport.cleanUp()\n\n\t// Connect to server.\n\tif err := transport.connect(); err != nil {\n\t\ttransport.log.Fatalf(\"Error creating connection: \", err)\n\t}\n\n\t// Receiver loop.\n\tgo transport.receiverLoop()\n\n\t// Semaphore clearing ticker.\n\tticker := time.NewTicker(time.Second * time.Duration(transport.antiFloodDelay))\n\tdefer ticker.Stop()\n\tgo func() {\n\t\tfor range ticker.C {\n\t\t\ttransport.resetFloodSemaphore()\n\t\t}\n\t}()\n\n\t// Main loop.\n\tfor {\n\t\tselect {\n\t\tcase msg, ok := <-transport.messages:\n\t\t\tif ok {\n\t\t\t\t// Are there any handlers registered for this IRC event?\n\t\t\t\tif handlers, exists := transport.ircEventHandlers[msg.Command]; exists {\n\t\t\t\t\tfor _, handler := range handlers {\n\t\t\t\t\t\thandler(transport, msg)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\ttransport.log.Infof(\"IRC transport exiting...\")\n}", "func runProcessor() {\n\t// process callback is invoked for each message delivered from\n\t// \"example-stream\" topic.\n\tcb := func(ctx goka.Context, msg interface{}) {\n\n\t\t// during the second run, this should break (as value should already be in context)\n\t\tif val := ctx.Value(); val != nil {\n\t\t\tpanic(fmt.Sprintf(\"dealing with a value already in context %v\", ctx.Value()))\n\t\t}\n\n\t\t// store received value in context (first run)\n\t\tctx.SetValue(msg.(string))\n\t\tlog.Printf(\"stored to ctx key = %s, msg = %v\", ctx.Key(), msg)\n\t}\n\n\t// Define a new processor group. The group defines all inputs, outputs, and\n\t// serialization formats. The group-table topic is \"example-group-table\".\n\tg := goka.DefineGroup(group,\n\t\tgoka.Input(topic, new(codec.String), cb),\n\t\tgoka.Persist(new(codec.String)),\n\t)\n\n\tp, err := goka.NewProcessor(brokers, g)\n\tif err != nil {\n\t\tlog.Fatalf(\"error creating processor: %v\", err)\n\t}\n\tif err = p.Run(context.Background()); err != nil {\n\t\tlog.Fatalf(\"error running processor: %v\", err)\n\t}\n}", "func (r *ride) run(ctx context.Context, outc chan<- pipeline.Event) error {\n\tpositions, errc := pipeline.Generate(ctx, r.positions)\n\tsegments, errc1 := pipeline.Reduce(ctx, positions, r.segments)\n\ttotal, err := r.fare(ctx, segments)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrm := pipeline.MergeErrors(ctx, errc, errc1)\n\tfor err := range errm {\n\t\tswitch {\n\t\tcase err == ErrLinesEmpty:\n\t\tcase err != nil:\n\t\t\treturn err\n\t\t}\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\tcase outc <- total:\n\t}\n\n\treturn nil\n}", "func (c *Controller) runWorker() {\n\tfor c.processNextWorkItem(c.ctx) {\n\t}\n}", "func (lp *loop) Run() (buffer string, err error) {\n\tfor {\n\t\tvar flag redrawFlag\n\t\tif lp.extractRedrawFull() {\n\t\t\tflag |= fullRedraw\n\t\t}\n\t\tlp.redrawCb(flag)\n\t\tselect {\n\t\tcase event := <-lp.inputCh:\n\t\t\t// Consume all events in the channel to minimize redraws.\n\t\tconsumeAllEvents:\n\t\t\tfor {\n\t\t\t\tlp.handleCb(event)\n\t\t\t\tselect {\n\t\t\t\tcase ret := <-lp.returnCh:\n\t\t\t\t\tlp.redrawCb(finalRedraw)\n\t\t\t\t\treturn ret.buffer, ret.err\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase event = <-lp.inputCh:\n\t\t\t\t\t// Continue the loop of consuming all events.\n\t\t\t\tdefault:\n\t\t\t\t\tbreak consumeAllEvents\n\t\t\t\t}\n\t\t\t}\n\t\tcase ret := <-lp.returnCh:\n\t\t\tlp.redrawCb(finalRedraw)\n\t\t\treturn ret.buffer, ret.err\n\t\tcase <-lp.redrawCh:\n\t\t}\n\t}\n}", "func (s *Scavenger) run() {\n\tdefer func() {\n\t\ts.emitStats()\n\t\tgo s.Stop()\n\t\ts.stopWG.Done()\n\t}()\n\n\t// Start a task to delete orphaned tasks from the tasks table, if enabled\n\tif s.cleanOrphans() {\n\t\ts.executor.Submit(&orphanExecutorTask{scvg: s})\n\t}\n\n\tvar pageToken []byte\n\tfor {\n\t\tresp, err := s.listTaskList(taskListBatchSize, pageToken)\n\t\tif err != nil {\n\t\t\ts.logger.Error(\"listTaskList error\", tag.Error(err))\n\t\t\treturn\n\t\t}\n\n\t\tfor _, item := range resp.Items {\n\t\t\tatomic.AddInt64(&s.stats.tasklist.nProcessed, 1)\n\t\t\tif !s.executor.Submit(s.newTask(&item)) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tpageToken = resp.NextPageToken\n\t\tif pageToken == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\ts.awaitExecutor()\n}", "func (proc *Processor) Run(shutdownSignal <-chan struct{}) {\n\tproc.wp.Start()\n\t<-shutdownSignal\n\tproc.wp.StopAndWait()\n}", "func (pb *Pubsub) run() {\n\tfor {\n\t\tselect {\n\t\tcase t := <-pb.updateCh.Get():\n\t\t\tpb.updateCh.Load()\n\t\t\tif pb.done.HasFired() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpb.callCallback(t.(*watcherInfoWithUpdate))\n\t\tcase <-pb.done.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (d *delayedRouteUpdater) run() {\n\terr := wait.PollImmediateInfinite(d.interval, func() (bool, error) {\n\t\td.updateRoutes()\n\t\treturn false, nil\n\t})\n\tif err != nil { // this should never happen, if it does, panic\n\t\tpanic(err)\n\t}\n}", "func Run() {\n\trun()\n}", "func (m *Mailer) run() {\n\tfor event := range m.service.events {\n\t\tm.handleEvent(event)\n\t}\n}", "func (s *SignalMonitor) Run() {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.isOn {\n\t\treturn\n\t}\n\ts.isOn = true\n\n\twg := &sync.WaitGroup{}\n\twg.Add(1)\n\n\tgo s.process(wg)\n\n\twg.Wait()\n}", "func (tp *TopicProcessor) RunLoop() error {\n\tconsumerChan := tp.getConsumerMessagesChan()\n\tmetricsTicker := time.NewTicker(tp.config.MetricsUpdateInterval)\n\tbatchTicker := time.NewTicker(tp.config.BatchWaitDuration)\n\n\tbatches := tp.getBatches()\n\tlengths := make(map[int]int)\n\n\ttp.logger.Info(\"Entering run loop\")\n\n\tfor {\n\t\tselect {\n\t\tcase consumerMessage := <-consumerChan:\n\t\t\ttp.logger.Debugf(\"Received: %s\", consumerMessage)\n\t\t\tpartition := int(consumerMessage.Partition)\n\t\t\tbatches[partition][lengths[partition]] = consumerMessage\n\t\t\tlengths[partition]++\n\t\t\tif lengths[partition] == tp.config.BatchSize {\n\t\t\t\ttp.logger.Debugf(\"Processing batch of %d messages...\", tp.config.BatchSize)\n\t\t\t\terr := tp.processConsumerMessages(batches[partition], partition)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttp.onClose(metricsTicker, batchTicker)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlengths[partition] = 0\n\t\t\t\ttp.logger.Debug(\"Processing of batch complete\")\n\t\t\t}\n\t\tcase <-metricsTicker.C:\n\t\t\ttp.onMetricsTick()\n\t\tcase <-batchTicker.C:\n\t\t\tfor _, partition := range tp.partitions {\n\t\t\t\tif lengths[partition] == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttp.logger.Debugf(\"Processing batch of %d messages...\", lengths[partition])\n\t\t\t\terr := tp.processConsumerMessages(batches[partition][0:lengths[partition]], partition)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttp.onClose(metricsTicker, batchTicker)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlengths[partition] = 0\n\t\t\t\ttp.logger.Debug(\"Processing of batch complete\")\n\t\t\t}\n\t\tcase <-tp.close:\n\t\t\ttp.onClose(metricsTicker, batchTicker)\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (w *worker) run() {\n\tw.a.V(2).S().P()\n\tdefer w.a.V(2).E().P()\n\n\tfor {\n\t\t// Get() blocks until it can return an item\n\t\titem, shutdown := w.queue.Get()\n\t\tif shutdown {\n\t\t\tw.a.Info(\"shutdown request\")\n\t\t\treturn\n\t\t}\n\n\t\tif err := w.processItem(item); err != nil {\n\t\t\t// Item not processed\n\t\t\t// this code cannot return an error and needs to indicate error has been ignored\n\t\t\tutilruntime.HandleError(err)\n\t\t}\n\n\t\t// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing\n\t\t// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you\n\t\t// still have to call `Done` on the queue.\n\t\tw.queue.Forget(item)\n\n\t\t// Remove item from processing set when processing completed\n\t\tw.queue.Done(item)\n\t}\n}", "func run() {\n\tfor {\n\t\tselect {\n\t\tcase at, more := <-pump:\n\t\t\tlog.WithField(ctx, \"time\", at).Debug(\"sse pump\")\n\n\t\t\tprev := nextTick\n\t\t\tnextTick = make(chan struct{})\n\t\t\t// trigger all listeners by closing the nextTick channel\n\t\t\tclose(prev)\n\n\t\t\tif !more {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tpump = nil\n\t\t\treturn\n\t\t}\n\t}\n}", "func (e *Engine) Run() {\n\te.running = true\n\tstart := time.Now()\n\n\tfor e.running {\n\t\te.simStepper.Step(e.World, e.World, time.Since(start))\n\t}\n}", "func (dfdd *dfddImpl) run() {\n\tfor {\n\t\tselect {\n\t\tcase e := <-dfdd.inputListenerCh:\n\t\t\tdfdd.handleListenerEvent(inputServiceID, e)\n\t\tcase e := <-dfdd.storeListenerCh:\n\t\t\tdfdd.handleListenerEvent(storeServiceID, e)\n\t\tcase <-dfdd.shutdownC:\n\t\t\tdfdd.shutdownWG.Done()\n\t\t\treturn\n\t\t}\n\t}\n}" ]
[ "0.72341156", "0.7054074", "0.69522285", "0.69447345", "0.6864312", "0.68395716", "0.67777276", "0.6761646", "0.6760093", "0.6743983", "0.66867375", "0.66777253", "0.6674406", "0.66260815", "0.6612342", "0.65981054", "0.65792686", "0.6576595", "0.65697527", "0.65504754", "0.6543386", "0.6539791", "0.6517756", "0.646242", "0.6449229", "0.64491975", "0.6434735", "0.6422709", "0.64213735", "0.64188486", "0.6417204", "0.6392233", "0.63890004", "0.6385682", "0.63734126", "0.6351435", "0.6336524", "0.63284844", "0.63281703", "0.63222206", "0.6321112", "0.62959975", "0.6291934", "0.6273609", "0.62706864", "0.62666035", "0.62653714", "0.62640285", "0.62613", "0.6259508", "0.62592417", "0.624562", "0.6241468", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61890674", "0.61813", "0.6177898", "0.61648643", "0.6162598", "0.61604965", "0.6154816", "0.6152411", "0.6149005", "0.61455417", "0.61438507", "0.61410123", "0.6138866", "0.6138644", "0.61319315", "0.613147", "0.6124598", "0.61231345", "0.6119422", "0.6119369", "0.6108102", "0.6107305", "0.6106992", "0.6106347", "0.61044854", "0.61013716", "0.6097333", "0.6093406", "0.60919195", "0.609183" ]
0.712248
1
stop stops the processor from sending values.
func (p *literalProcessor) stop() { syncClose(p.done) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *reaper) stop() {\n\tr.stopCh <- struct{}{}\n}", "func (sl *ReceiverLoop) stop() {\n\tsl.cancel()\n\t<-sl.stopped\n}", "func (t *channelTransport) stop() {\n\tt.stopChan <- struct{}{}\n}", "func (d *D) stop() {\n\tclose(d.stopCh)\n}", "func (w *Processor) Stop() {\n\tclose(w.stop)\n}", "func (er *BufferedExchangeReporter) Stop() {\n\n}", "func (i2c I2C) stop() {\n\t// Send stop condition.\n\tavr.TWCR.Set(avr.TWCR_TWEN | avr.TWCR_TWINT | avr.TWCR_TWSTO)\n\n\t// Wait for stop condition to be executed on bus.\n\tfor !avr.TWCR.HasBits(avr.TWCR_TWSTO) {\n\t}\n}", "func (s *schedule) stop() {\n\tif !s.running {\n\t\treturn\n\t}\n\ts.running = false\n\ts.stopCh <- struct{}{}\n}", "func (c *Controller) stop(name types.NamespacedName) {\n\tproc, ok := c.procs[name]\n\tif !ok {\n\t\treturn\n\t}\n\n\tif proc.cancelFunc == nil {\n\t\treturn\n\t}\n\tproc.cancelFunc()\n\t<-proc.doneCh\n\tproc.probeWorker = nil\n\tproc.cancelFunc = nil\n\tproc.doneCh = nil\n}", "func (w *Watch) stop() {\n\tw.done <- struct{}{}\n}", "func (pool *WebSocketPool)stop() {\n\tclose(pool.input)\n}", "func (bc *BotCommand) stop() {\n\tbc.Lock()\n\tdefer bc.Unlock()\n\tbc.running = false\n}", "func (w *worker) stop() {\n\tatomic.StoreInt32(&w.running, 0)\n}", "func (w *worker) stop() {\n\tselect {\n\tcase w.stopCh <- struct{}{}:\n\tdefault: // Non-blocking.\n\t}\n}", "func (hb *heartbeat) stop() {\n\tselect {\n\tcase hb.stopChan <- struct{}{}:\n\tdefault:\n\t}\n}", "func (x *x509Handler) stop() {\n\tclose(x.stopChan)\n}", "func stop(c *cli.Context) error {\n\n\tif !isSystemRunning() {\n\t\treturn nil\n\t}\n\t//readers, writers, _, controllers := getIPAddresses()\n\treaders, writers, _, _ := getIPAddresses()\n\n\tfor _, ipaddr := range readers {\n\t\tfmt.Println(\"reader\", ipaddr, \"StopProcess\")\n\t\tipaddrs := make([]string, 1)\n\t\tipaddrs[0] = ipaddr\n\t\tsendCommandToControllers(ipaddrs, \"StopProcess\", \"\")\n\n\t}\n\tfor _, ipaddr := range writers {\n\t\tfmt.Println(\"writer\", ipaddr, \"StopProcess\")\n\t\tipaddrs := make([]string, 1)\n\t\tipaddrs[0] = ipaddr\n\t\tsendCommandToControllers(ipaddrs, \"StopProcess\", \"\")\n\n\t}\n\n\t//sendCommandToControllers(controllers, \"StopReaders\", \"\")\n\t//sendCommandToControllers(controllers, \"StopWriters\", \"\")\n\t//sendCommandToControllers(controllers, \"StopServers\", \"\")\n\treturn nil\n}", "func (s *ContinuousScanner) Stop() {\n\ts.stop <- struct{}{}\n}", "func (b *batchSender) Stop() {\n\tclose(b.ch)\n\n\t// If the buffer can be finished off with 2 or less calls, remove the sleep interval\n\t// so it processes whatever is left without any sleeping.\n\tif len(b.ch) < (b.maxBatch * 2) { //nolint:gomnd // magic number `2` is explained in comment above.\n\t\tb.interval = 0\n\t}\n}", "func (c *Processor) Stop() (err error) {\n\tc.runState = RunStateStopped\n\treturn\n}", "func (b *BTCVanity) Stop() {\n\tb.stop <- true\n}", "func (b *BTCVanity) Stop() {\n\tb.stop <- true\n}", "func (p *Proposer) Stop() {\n\tp.stop <- struct{}{}\n}", "func (r *RunCommand) stop() {\n\tr.logTail.Stop()\n\tr.pw.Stop()\n}", "func (s *server) stop() {\n\ts.stopMu.Lock()\n\tdefer s.stopMu.Unlock()\n\n\tclose(s.stopCh)\n\ts.stopCh = make(chan struct{})\n}", "func (s *Scanner) Stop() {\n\ts.stop <- struct{}{}\n}", "func (s *Sender) Stop() {\n\tclose(s.inputChan)\n\t<-s.done\n}", "func (nr *namedReceiver) Stop(ctx context.Context, d Dest) error {\n\tmetricRecvTotal.WithLabelValues(d.Type.String(), \"STOP\")\n\treturn nr.Receiver.Stop(ctx, d)\n}", "func (p *Processor) Stop() {\n\tc := p.stopChan\n\tp.stopChan = nil\n\tc <- struct{}{}\n\tp.Conn.Close()\n}", "func (w *StatsWriter) Stop() {\n\tw.stop <- struct{}{}\n\t<-w.stop\n\tstopSenders(w.senders)\n}", "func (np *nodeProcess) stop() error {\n\terr := np.node.Stop()\n\tnp.rawClient.Kill()\n\treturn err\n}", "func (i *I2C) stop() {\n\t// Page 9, section 3.1.4 START and STOP conditions\n\ti.scl.Out(gpio.Low)\n\ti.sleepHalfCycle()\n\ti.scl.Out(gpio.High)\n\ti.sleepHalfCycle()\n\ti.sda.Out(gpio.High)\n\t// TODO(maruel): This sleep could be skipped, assuming we wait for the next\n\t// transfer if too quick to happen.\n\ti.sleepHalfCycle()\n}", "func (w *worker) stop() {\n\tw.quitChan <- true\n}", "func (w *Wheel) Stop() {\n\tw.stopper.TrySend()\n}", "func (m *Module) stop() {\n\tm.mux.Lock()\n\tdefer m.mux.Unlock()\n\tif m.started && !m.isFinished() {\n\t\tclose(m.done)\n\t}\n}", "func (v *VpxEncoder) Stop() {\n\tv.release()\n}", "func (a *appsec) stop() {\n\ta.unregisterWAF()\n\ta.limiter.Stop()\n}", "func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}", "func (device *SilentStepperBrick) Stop() (err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Set(uint8(FunctionStop), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}", "func (s *status) stopping() error { return s.set(\"stopping\", \"STOPPING=1\") }", "func (a API) Stop(cmd *None) (e error) {\n\tRPCHandlers[\"stop\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func stopValuePusher(in *Input) {\n\tselect {\n\tcase in.quitChan <- true:\n\tdefault:\n\t\t// wasn't running (is there a race here?)\n\t}\n}", "func Stop() {\n\tstopRunning <- true\n\n}", "func (s *Streamer) Stop() {\n\tclose(s.stopc)\n}", "func (h *ProxyHealth) stop() {\n\tif h.cancel != nil {\n\t\th.cancel <- struct{}{}\n\t\tclose(h.cancel)\n\t\th.cancel = nil\n\t}\n}", "func (m *mapper) stop() { syncClose(m.done) }", "func (c *Counter) Stop() {\n\tc.controlChannel <- controlRecord{\"stop\", \"\"}\n}", "func (o *Outbound) Stop() error {\n\treturn o.once.Stop(o.chooser.Stop)\n}", "func (t *gRPCTransport) stop() {\n\t// Stop Communicate RPC and sendLoop\n\tt.grpcServer.Stop()\n\tfor i := 1; i <= 2; i++ {\n\t\tt.stopChan <- struct{}{}\n\t}\n\t// Close connections to peers.\n\tfor _, p := range t.peers {\n\t\tp.stop()\n\t}\n}", "func (it *messageIterator) stop() {\n\tit.cancel()\n\tit.mu.Lock()\n\tit.checkDrained()\n\tit.mu.Unlock()\n\tit.wg.Wait()\n}", "func (cMap *MyStruct) Stop(){\n\tcMap.stop <- true\n}", "func (f *Processor) Stop() {\n\tclose(f.quit)\n\tf.eventsSemaphore.Terminate()\n\tf.wg.Wait()\n\tf.buffer.Clear()\n}", "func (p *Prober) Stop() {\n\tclose(p.stop)\n}", "func (n *Notary) Stop() {\n\tclose(n.stopCh)\n}", "func (sp *StreamPool) Stop() {\n\t//sw.quitCh <- true\n}", "func (m *Machine) Stop() {\n\tm.stopSign <- struct{}{}\n}", "func (w *Watcher) Stop() { w.streamer.Stop() }", "func (p *Pipeline) Stop() {\n\tC.gstreamer_receive_stop_pipeline(p.Pipeline)\n}", "func (converger *converger) Stop() {\n\tconverger.stop <- struct{}{}\n}", "func (v *vtStopCrawler) stop() {\n\tfor _, worker := range v.workers {\n\t\tworker.stop()\n\t}\n\tclose(v.done)\n}", "func (r *RoverDriver) Stop() {\n r.commands <- stop\n}", "func (eis *eventSocket) stop() error {\n\teis.log.Info(\"closing Chain IPC\")\n\terrs := wrappers.Errs{}\n\terrs.Add(eis.unregisterFn(), eis.socket.Close())\n\treturn errs.Err\n}", "func (r *ReceiveFuncState) Stop() {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tr.running = false\n\tselfCheckLocked()\n}", "func (g *Goer) stop() {\n\t// emitted OnStop callback func.\n\tif g.OnStop != nil {\n\t\tg.OnStop()\n\t}\n\n\t// close client.\n\tg.Connections.Range(func(k, connection interface{}) bool {\n\t\tconnection.(connections.Connection).Close(\"\")\n\t\treturn true\n\t})\n\n\tg.OnMessage, g.OnError, g.OnClose, g.OnBufferDrain, g.OnBufferFull = nil, nil, nil, nil, nil\n}", "func (s *maxEPSSampler) Stop() {\n\ts.reportDone <- true\n\t<-s.reportDone\n\n\ts.rateCounter.Stop()\n}", "func (this *Reporter) Stop() {\n\tthis.Status = REPORT_STATUS_STOP\n}", "func (e *Engine) stop() error {\n\te.booted = false\n\n\t// instruct engine to shutdown\n\tshutdown := \"shutdown\"\n\tcommunication.Publish(\n\t\tnaming.Topic(e.Index, naming.Command),\n\t\tnaming.Publisher(e.Index, naming.Command),\n\t\tshutdown)\n\n\t// stop subscribing to engine's commands and events\n\te.Communication.Teardown()\n\n\t// TODO create graphic for MQTT hierarchy, whos's publishing what to whom and why\n\t// TODO explain MQTT hierarchy\n\treturn nil\n}", "func (b *blocksProviderImpl) Stop() {\n\tatomic.StoreInt32(&b.done, 1)\n\tb.client.CloseSend()\n}", "func (p *Peer) stop() {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\tp.heartbeatTimer.Stop()\n}", "func (p *Peer) stop() {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\tp.heartbeatTimer.Stop()\n}", "func (sc *controller) stopScraping() {\n\tclose(sc.done)\n}", "func (s *samplerBackendRateCounter) Stop() {\n\tclose(s.exit)\n\t<-s.stopped\n}", "func Stop() {\n\ts.Stop()\n}", "func (e *Engine) Stop() {\n\te.running = false\n}", "func (h *hub) stop() {\n\th.Lock()\n\tdefer h.Unlock()\n\n\t// Stop transport\n\tif err := h.transport.stop(); err != nil {\n\t\tlog.Printf(\"[error] error stopping transport: %v\", err)\n\t}\n\n\t// Create list of clients ids to be terminated\n\tids := make([]string, len(h.clients))\n\ti := 0\n\n\t// Close client connections\n\tfor id, client := range h.clients {\n\t\tclose(client.outboundc)\n\t\tclient.sock.Close()\n\t\tids[i] = id\n\t\ti++\n\t}\n\n\t// Remove clients from presence\n\tif err := h.presence.removeMulti(ids); err != nil {\n\t\tlog.Printf(\"[error] error removing clients from presence: %v\", err)\n\t}\n}", "func (s *Sender) Stop() {\n\tif !s.running {\n\t\treturn\n\t}\n\n\tclose(s.quit)\n}", "func (ps *rateLimiter) Stop() { close(ps.exit) }", "func (bt *Metricbeat) Stop() {\n\tclose(bt.done)\n}", "func (e *binaryExprEvaluator) stop() {\n\te.lhs.stop()\n\te.rhs.stop()\n\tsyncClose(e.done)\n}", "func stop() {\n\t// Close the channel to stop the mail daemon.\n\tclose(emailCh)\n}", "func stop() error {\n\tif spammerInstance == nil {\n\t\treturn ErrSpammerDisabled\n\t}\n\n\tspammerLock.Lock()\n\tdefer spammerLock.Unlock()\n\n\tstopWithoutLocking()\n\n\tisRunning = false\n\n\treturn nil\n}", "func (w *Watcher) Stop() {\n\tw.StopChannel <- true\n}", "func (pomo *Pomo) Stop() {\n\n\tif pomo.Status == ON {\n\t\tpomo.Cancel <- true\n\t\tpomo.Time = DEFAULT_DURATION\n\t}\n\n}", "func (f *framework) stop() {\n\tclose(f.epochChan)\n}", "func (room *RoomMessages) stop() {\n\troom.messagesFree()\n\troom.StopPublish()\n}", "func stop() {\n\trobot.RLock()\n\tpr := robot.pluginsRunning\n\tstop := robot.stop\n\trobot.RUnlock()\n\tLog(Debug, fmt.Sprintf(\"stop called with %d plugins running\", pr))\n\trobot.Wait()\n\tbrainQuit()\n\tclose(stop)\n}", "func (a *Attacker) Stop() {\n\tselect {\n\tcase <-a.stopch:\n\t\treturn\n\tdefault:\n\t\tclose(a.stopch)\n\t}\n}", "func (s *streamStrategy) Stop() {\n\tclose(s.inputChan)\n\t<-s.done\n}", "func (dt *discoveryTool) stop() {\n\tclose(dt.done)\n\n\t//Shutdown timer\n\ttimer := time.NewTimer(time.Second * 3)\n\tdefer timer.Stop()\nL:\n\tfor { //Unblock go routine by reading from dt.dataChan\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tbreak L\n\t\tcase <-dt.dataChan:\n\t\t}\n\t}\n\n\tdt.wg.Wait()\n}", "func (t *Transport) Stop() {\n\tt.cancel()\n\tt.stopper.Stop()\n\tt.paxosRPC.Stop()\n}", "func (a *Acceptor) Stop() {\n\t//TODO(student): Task 3 - distributed implementation\n\ta.stop <- 0\n\n}", "func (g *NOOPTransport) Stop() {\n}", "func (p *pinger) Stop() {\n\tp.stop <- struct{}{}\n}", "func (h *handler) Stop() {\n\tclose(h.stop)\n\t<-h.stopDone\n}", "func (margelet *Margelet) Stop() {\n\tmargelet.running = false\n}", "func (t *Tracer) Stop() {}", "func (mm *BytesMonitor) Stop(ctx context.Context) {\n\tmm.doStop(ctx, true)\n}", "func (app *frame) Stop() {\n\tapp.isStopped = true\n}", "func (rtspService *RTSPService) Stop(msg *wssapi.Msg) (err error) {\n\treturn\n}", "func (o *influxDBLogger) stop() error {\n\treturn nil\n}" ]
[ "0.7351431", "0.7231694", "0.71288663", "0.70204103", "0.69777024", "0.68193096", "0.67930245", "0.6749612", "0.6748719", "0.67416406", "0.67310125", "0.6711041", "0.6673169", "0.6668644", "0.6649103", "0.66150033", "0.6612311", "0.66082686", "0.6555046", "0.65514636", "0.6526342", "0.6526342", "0.65231216", "0.6509506", "0.65089655", "0.65073013", "0.64905715", "0.64842296", "0.64825183", "0.64691806", "0.64512146", "0.64398754", "0.64387625", "0.64343005", "0.6425349", "0.6403836", "0.63935876", "0.6390791", "0.6386251", "0.6380497", "0.6378134", "0.6372466", "0.6365298", "0.63594705", "0.63462734", "0.6337409", "0.63315964", "0.6327937", "0.6324012", "0.6321564", "0.63152313", "0.63136274", "0.6277813", "0.62564814", "0.6256467", "0.62544143", "0.62543213", "0.6247504", "0.6247207", "0.6240775", "0.6230369", "0.62213", "0.62199485", "0.6219562", "0.62166035", "0.621228", "0.62095636", "0.62074167", "0.6190809", "0.6190809", "0.6190592", "0.6187251", "0.6185713", "0.61813414", "0.61790806", "0.61754894", "0.61692244", "0.6166611", "0.6162993", "0.61583066", "0.61573106", "0.6155288", "0.6153345", "0.6150117", "0.614409", "0.6142307", "0.61369365", "0.61363155", "0.61311644", "0.612985", "0.61231863", "0.6118975", "0.61167073", "0.6113777", "0.611198", "0.6106903", "0.6101787", "0.6099458", "0.609705", "0.60950524" ]
0.7386047
0
name returns the source name.
func (p *literalProcessor) name() string { return "" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Source) Name() string {\n\treturn s.SourceName\n}", "func (s *Source) Name() string {\n\treturn \"spyse\"\n}", "func (r Source) GetName() string {\n\treturn r.Name\n}", "func (s *Source) Name() string {\n\treturn \"crtsh\"\n}", "func (e *Event) SourceName() collection.Name {\n\tif e.Source != nil {\n\t\treturn e.Source.Name()\n\t}\n\treturn \"\"\n}", "func (s Source) Name() string { return \"rdt\" }", "func (d *DataPacket) SourceName() string {\n\ti := 44 //the ending index for the string, because it is 0 terminated\n\tfor i < 108 && d.data[i] != 0 {\n\t\ti++\n\t}\n\treturn string(d.data[44:i])\n}", "func (s *Source) GetName() string {\n\treturn s.Name\n}", "func (s *Source) Name() string {\n\treturn \"github\"\n}", "func (s *YAMLFileSource) Name() (name string) {\n\treturn fmt.Sprintf(\"yaml file(%s)\", s.path)\n}", "func (o ResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o CloudHealthcareSourceOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v CloudHealthcareSource) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o *ActionDTO) GetSourceName() string {\n\tif o == nil || o.SourceName == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SourceName\n}", "func (o GetEventSourcesSourceOutput) EventSourceName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetEventSourcesSource) string { return v.EventSourceName }).(pulumi.StringOutput)\n}", "func (o ContainerResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ContainerResourceMetricSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o DataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DataSource) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o *BulletinDTO) GetSourceName() string {\n\tif o == nil || o.SourceName == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SourceName\n}", "func (o *TransactionSplit) GetSourceName() string {\n\tif o == nil || o.SourceName.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SourceName.Get()\n}", "func (s *scraper) Source() string {\n\treturn s.name\n}", "func (g componentSourceGenerator) GetName() string {\n\treturn g.Name\n}", "func (o CloudHealthcareSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *CloudHealthcareSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *Structured) GetName() string {\n\treturn s.cloudEvent.Source\n}", "func (s *CommandLineSource) Name() (name string) {\n\treturn \"command-line\"\n}", "func (o ResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecVolumePersistentVolumeClaimSpecDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumePersistentVolumeClaimSpecDataSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (r *reducer) name() string { return r.stmt.Source.(*Measurement).Name }", "func (o ResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (j *Jsonnet) Name(wantsNameSpaced bool) string {\n\tbase := filepath.Base(j.source)\n\tname := strings.TrimSuffix(base, filepath.Ext(base))\n\tif !wantsNameSpaced {\n\t\treturn name\n\t}\n\n\tif j.module == \"/\" {\n\t\treturn name\n\t}\n\n\treturn path.Join(j.module, name)\n}", "func (o ArgoCDExportSpecStoragePvcDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ArgoCDExportSpecStoragePvcDataSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (src *Tracer) Name() string {\n\treturn src.name\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesSecretOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSourcesSecret) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (s *Data) Source() string {\n\treturn fmt.Sprintf(\"data:%v\", path.Clean(s.Location))\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesSecretOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSourcesSecret) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (s *MapSource) Name() (name string) {\n\treturn \"map\"\n}", "func (*Plugin) SourceFileName() string {\n\treturn sourceFileName\n}", "func (*Plugin) SourceFileName() string {\n\treturn sourceFileName\n}", "func (o ApplicationStatusOperationStateSyncResultSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSourcePatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesSecretPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceProjectedSourcesSecret) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumePersistentVolumeClaimSpecDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumePersistentVolumeClaimSpecDataSource) string { return v.Name }).(pulumi.StringOutput)\n}", "func (mySource *Source) Source() (param string) {\n\treturn mySource.Sourcevar\n}", "func (o RegistryTaskSourceTriggerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RegistryTaskSourceTrigger) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o BuildSpecSourceCredentialsOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildSpecSourceCredentials) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o BuildRunStatusBuildSpecSourceCredentialsOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecSourceCredentials) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationSpecSourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ApplicationSpecSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesSecretPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceProjectedSourcesSecret) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *EnvironmentSource) Name() (name string) {\n\treturn \"environment\"\n}", "func (o SourceOutput) DisplayName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Source) pulumi.StringOutput { return v.DisplayName }).(pulumi.StringOutput)\n}", "func (c *auditLog) getName() string {\n\treturn c.name\n}", "func (o CloudHealthcareSourceResponseOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CloudHealthcareSourceResponse) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumePersistentVolumeClaimSpecDataSourcePtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumePersistentVolumeClaimSpecDataSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *SecretsSource) Name() (name string) {\n\treturn \"secrets\"\n}", "func (a Asset) source() string {\n\tsource := fileNameWithoutExt(a.PublicID)\n\n\tif !isURL(source) {\n\t\tvar err error\n\t\tsource, err = url.QueryUnescape(strings.Replace(source, \"%20\", \"+\", -1))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tsource = smartEscape(source)\n\n\tif a.Suffix != \"\" {\n\t\tsource += fmt.Sprintf(\"/%s\", a.Suffix)\n\t}\n\n\tif filepath.Ext(a.PublicID) != \"\" {\n\t\tsource += filepath.Ext(a.PublicID)\n\t}\n\n\treturn source\n}", "func (s *SourceImportAuthor) GetName() string {\n\tif s == nil || s.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *s.Name\n}", "func (s *Stream) Name() string { return s.file.Name() }", "func (p ProjectInit) Name() string {\n\treturn string(p)\n}", "func (o ApplicationStatusOperationStateSyncResultSourceHelmParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourceHelmParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (ci converterInfo) Source() string {\n\treturn ci.source\n}", "func (o ResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusHistorySourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusSyncComparedToSourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (fi *fileInfo) Name() string { return fi.name }", "func (o BuildRunStatusBuildSpecSourceCredentialsPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildRunStatusBuildSpecSourceCredentials) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o TargetProjectOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *TargetProject) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (this Intro) name() estring {\n\treturn this.n\n}", "func (dt *Targeter) name() string {\n\tvar id string\n\tif dt.IDs != nil {\n\t\tid = \"{id}\"\n\t}\n\treturn fmt.Sprintf(\"%s %s/%s/%s\", dt.Method, dt.BaseURL, dt.Endpoint, id)\n}", "func (fe *fileEntry) Name() string { return fe.name }", "func (o ApplicationSpecSourceHelmParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourceHelmParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (e *Event) Source() string {\n\treturn e.conn\n}", "func (o ApplicationOperationSyncSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationOperationSyncSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesConfigMapOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSourcesConfigMap) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o LookupServiceIntegrationResultOutput) SourceServiceName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupServiceIntegrationResult) string { return v.SourceServiceName }).(pulumi.StringOutput)\n}", "func (o ApplicationStatusOperationStateSyncResultSourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesConfigMapOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSourcesConfigMap) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (t Type) Source() string {\n\treturn t.source\n}", "func (o *ActionDTO) SetSourceName(v string) {\n\to.SourceName = &v\n}", "func (g GitHub) Name() string {\n\tif g.local != \"\" {\n\t\treturn g.local\n\t}\n\treturn g.binary\n}", "func (p Packet) Name() (name string) {\n\t// todo: think of ways to make this not a compiled in hack\n\t// todo: collectd 4 uses different patterns for some plugins\n\t// https://collectd.org/wiki/index.php/V4_to_v5_migration_guide\n\tswitch p.Plugin {\n\tcase \"df\":\n\t\tname = fmt.Sprintf(\"df_%s_%s\", p.PluginInstance, p.TypeInstance)\n\tcase \"interface\":\n\t\tname = fmt.Sprintf(\"%s_%s\", p.Type, p.PluginInstance)\n\tcase \"load\":\n\t\tname = \"load\"\n\tcase \"memory\":\n\t\tname = fmt.Sprintf(\"memory_%s\", p.TypeInstance)\n\tdefault:\n\t\tname = fmt.Sprintf(\"%s_%s_%s_%s\", p.Plugin, p.PluginInstance, p.Type, p.TypeInstance)\n\t}\n\treturn name\n}", "func (o ApplicationStatusHistorySourcePluginEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySourcePluginEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o SiaFileInfo) Name() string {\n\treturn o.FileName\n}", "func (o ApplicationStatusSyncComparedToSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func source() string {\n\treturn \"I am an evil gopher\"\n}", "func (c *withNameAndCode) Name() string {\n\treturn c.name\n}", "func (o BuildSpecSourceCredentialsPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildSpecSourceCredentials) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ContainerResourceMetricSourcePatchPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSourcePatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *BulletinDTO) SetSourceName(v string) {\n\to.SourceName = &v\n}", "func GenVolumeSourceName(source string, index int64) string {\n\treturn source + common.NameSeparator + strconv.FormatInt(index, 10)\n}", "func (e *EDNS) Name() string { return name }", "func fileSource(filename string, i int) string {\n\treturn fmt.Sprintf(\"%s:%d\", filename, i)\n}", "func (ds *Datasource) GetName() string {\n\treturn ds.name\n}", "func (e *Entry) Name() string {\n\tif len(e.path) == 0 {\n\t\treturn \"\"\n\t}\n\treturn e.path[len(e.path)-1]\n}" ]
[ "0.78794163", "0.75167346", "0.7478469", "0.7449441", "0.7437304", "0.7387519", "0.73252875", "0.72747105", "0.7236276", "0.7090298", "0.7044449", "0.7044449", "0.6945361", "0.6810892", "0.68048626", "0.6776871", "0.6776871", "0.6724642", "0.6661173", "0.6651805", "0.6625454", "0.6564692", "0.65482056", "0.6521428", "0.6444199", "0.6427358", "0.6427358", "0.64273053", "0.64019334", "0.63985956", "0.63985956", "0.63970643", "0.63568366", "0.63545203", "0.6345645", "0.6311637", "0.6308424", "0.6303546", "0.6260226", "0.6260226", "0.6247084", "0.6245244", "0.6245244", "0.62372345", "0.62371534", "0.62371534", "0.6222376", "0.6209819", "0.62011534", "0.6200014", "0.61755013", "0.6159335", "0.61291414", "0.6119233", "0.6113662", "0.61117154", "0.61012226", "0.6096734", "0.60674095", "0.6056915", "0.60292953", "0.6028448", "0.60220057", "0.6015895", "0.6010574", "0.5983533", "0.5958791", "0.5958791", "0.5958361", "0.59547806", "0.59520304", "0.59423393", "0.5939665", "0.59377843", "0.59375775", "0.59307474", "0.5929412", "0.5926923", "0.5923438", "0.59232825", "0.59212494", "0.5920124", "0.59168303", "0.59166396", "0.5913426", "0.5906614", "0.5902419", "0.58879215", "0.5887569", "0.58861053", "0.5886017", "0.5874572", "0.58686024", "0.58668345", "0.58668345", "0.5858391", "0.5853577", "0.5842946", "0.5840868", "0.58404166", "0.58368796" ]
0.0
-1
syncClose closes a "done" channel and waits for a response.
func syncClose(done chan chan struct{}) { ch := make(chan struct{}, 0) done <- ch <-ch }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Channel) Close() {\n\tclose(c.done)\n\tc.sharedDone()\n}", "func (o *Switch) CloseAsync() {\n\to.close()\n}", "func (r *SbProxy) closeDone() {\n\tr.doneOnce.Do(func() { close(r.doneCh) })\n}", "func Close() {\n\tcancelCh <- struct{}{}\n\t<-doneCh\n}", "func (a *Async) Close() {\n\ta.quit <- true\n\t// wait for watcher quit\n\t<-a.quit\n}", "func (z *ZMQ4) CloseAsync() {\n\tif atomic.CompareAndSwapInt32(&z.running, 1, 0) {\n\t\tclose(z.closeChan)\n\t}\n}", "func (r *Resource) CloseAsync() {\n}", "func (r *Resource) CloseAsync() {\n}", "func (t *TestProcessor) CloseAsync() {\n\tprintln(\"Closing async\")\n}", "func (ch *Channel) Close() {}", "func TestRTCPeerConnection_Close(t *testing.T) {\n\t// Limit runtime in case of deadlocks\n\tlim := test.TimeOut(time.Second * 20)\n\tdefer lim.Stop()\n\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tpcOffer, pcAnswer, err := newPair()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tawaitSetup := make(chan struct{})\n\tpcAnswer.OnDataChannel(func(d *RTCDataChannel) {\n\t\tclose(awaitSetup)\n\t})\n\n\t_, err = pcOffer.CreateDataChannel(\"data\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = signalPair(pcOffer, pcAnswer)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t<-awaitSetup\n\n\terr = pcOffer.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = pcAnswer.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (srv *Server) WaitClose() {\n\t<-srv.closeChan\n}", "func (m *MQTT) CloseAsync() {\n\tgo func() {\n\t\tm.connMut.Lock()\n\t\tif m.client != nil {\n\t\t\tm.client.Disconnect(0)\n\t\t\tm.client = nil\n\t\t}\n\t\tm.connMut.Unlock()\n\t}()\n}", "func (m mockProc) CloseAsync() {\n\t// Do nothing as our processor doesn't require resource cleanup.\n}", "func DoneChanClosed(oid uint64) bool {\n\tci, ok := GetManagedObject(oid)\n\tif !ok {\n\t\tpanic(\"failed to get the done chan\")\n\t}\n\tc := ci.(<-chan struct{})\n\tselect {\n\tcase <-c:\n\t\treturn true\n\tdefault:\n\t}\n\treturn false\n}", "func (aio *AsyncIO) Close() error {\n\tif aio.ioctx == 0 {\n\t\treturn ErrNotInit\n\t}\n\n\t// send to signal to stop wait\n\taio.close <- struct{}{}\n\t<-aio.close\n\n\tif aio.close != nil {\n\t\tclose(aio.close)\n\t}\n\tif aio.trigger != nil {\n\t\tclose(aio.trigger)\n\t}\n\n\t// destroy async IO context\n\taio.ioctx.Destroy()\n\taio.ioctx = 0\n\n\t// close file descriptor\n\tif err := aio.fd.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o *Switch) WaitForClose(timeout time.Duration) error {\n\tselect {\n\tcase <-o.closedChan:\n\tcase <-time.After(timeout):\n\t\treturn types.ErrTimeout\n\t}\n\treturn nil\n}", "func (dc *Decompressor) waitForChannelToClose(ctx context.Context, ch <-chan *blockDesc) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase _, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (s socket) Close() error {\n\ts.done <- true\n\treturn nil\n}", "func (r *Retry) CloseAsync() {\n\tif atomic.CompareAndSwapInt32(&r.running, 1, 0) {\n\t\tclose(r.closeChan)\n\t}\n}", "func (rr *RtcpReceiver) Close() {\n\trr.events <- rtcpReceiverEventTerminate{}\n\t<-rr.done\n}", "func (lc *Closer) Done() {\n\tlc.waiting.Done()\n}", "func (c *Channel) Close() (result *utils.AsyncResult) {\n\tif c.State != channeltype.StateOpened {\n\t\tlog.Warn(fmt.Sprintf(\"try to close channel %s,but it's state is %s\", utils.HPex(c.ChannelIdentifier.ChannelIdentifier), c.State))\n\t}\n\tif c.State == channeltype.StateClosed ||\n\t\tc.State == channeltype.StateSettled {\n\t\tresult = utils.NewAsyncResult()\n\t\tresult.Result <- fmt.Errorf(\"channel %s already closed or settled\", utils.HPex(c.ChannelIdentifier.ChannelIdentifier))\n\t\treturn\n\t}\n\t/*\n\t\t在关闭的过程中崩溃了,或者关闭 tx 失败了,这些都可能发生.所以不能因为 state 不对,就不允许 close\n\t\t标记的目的是为了阻止继续接受或者发起交易.\n\t*/\n\tc.State = channeltype.StateClosing\n\tbp := c.PartnerState.BalanceProofState\n\tresult = c.ExternState.Close(bp)\n\treturn\n}", "func closeDependency(ctx context.Context, n int, wg *sync.WaitGroup) {\n\ttime.Sleep((time.Duration(rand.Intn(10) * 25)) * time.Millisecond)\n\tfmt.Printf(\"[%d] closed\\n\", n)\n\twg.Done() // decrements the WaitGroup counter by one, same as wg.Add(-1) // HL\n}", "func (m *MQTT) WaitForClose(timeout time.Duration) error {\n\treturn nil\n}", "func (s *EtcdV3) Close() {\n\tclose(s.done)\n\ts.client.Close()\n}", "func TestRaceBetweenChannelAndConnectionClose(t *testing.T) {\n\tdefer time.AfterFunc(10*time.Second, func() { panic(\"Close deadlock\") }).Stop()\n\n\tconn := integrationConnection(t, \"allocation/shutdown race\")\n\n\tgo conn.Close()\n\tfor i := 0; i < 10; i++ {\n\t\tgo func() {\n\t\t\tch, err := conn.Channel()\n\t\t\tif err == nil {\n\t\t\t\tch.Close()\n\t\t\t}\n\t\t}()\n\t}\n}", "func (t *TestProcessor) WaitForClose(timeout time.Duration) error {\n\tprintln(\"Waiting for close\")\n\treturn nil\n}", "func (r *ReadUntil) CloseAsync() {\n\tr.shutSig.CloseAtLeisure()\n}", "func (_m *AsyncBR) Close(ctx context.Context) {\n\t_m.Called(ctx)\n}", "func (p *AsyncBundleUnacks) CloseAsync() {\n\tp.r.CloseAsync()\n}", "func (client *SyncClient) Close() error {\n\tif client.cClient == nil {\n\t\treturn nil\n\t}\n\n\tfor _, cbId := range client.cCallbacks {\n\t\tcCallbackUnregister(cbId)\n\t}\n\n\tclient.ob.syncClient = nil\n\n\treturn cCall(func() C.obx_err {\n\t\tdefer func() { client.cClient = nil }()\n\t\treturn C.obx_sync_close(client.cClient)\n\t})\n}", "func (c *Client) Done() {\n\tc.doneCh <- true\n}", "func waitForResponse(originalResp *Response, channel chan *rpc.Call) {\n\tresp := <-channel\n\tvar test *Response\n\ttest = resp.Reply.(*Response)\n\ttest.client.Close()\n\toriginalResp.Reply = test.Reply\n\treturn\n}", "func GetDoneChannel() *chan os.Signal {\n\tdone := make(chan os.Signal, 1)\n\tsignal.Notify(done, os.Interrupt)\n\treturn &done\n}", "func (c *Client) Done() {\n\t<-c.done.Done()\n}", "func (z *ZMQ4) WaitForClose(timeout time.Duration) error {\n\tselect {\n\tcase <-z.closedChan:\n\tcase <-time.After(timeout):\n\t\treturn types.ErrTimeout\n\t}\n\treturn nil\n}", "func (w *Watcher) Close() {\n w.done <- true\n close(w.done)\n}", "func (s *Stream) closeTimeout() {\n\t// Close our side forcibly\n\ts.forceClose()\n\n\t// Free the stream from the session map\n\ts.session.closeStream(s.id)\n\n\t// Send a RST so the remote side closes too.\n\ts.sendLock.Lock()\n\tdefer s.sendLock.Unlock()\n\ts.sendHdr.encode(typeWindowUpdate, flagRST, s.id, 0)\n\ts.session.sendNoWait(s.sendHdr)\n}", "func (m mockProc) WaitForClose(timeout time.Duration) error {\n\t// Do nothing as our processor doesn't require resource cleanup.\n\treturn nil\n}", "func chanClose(ch *channel) {\n\tif ch == nil {\n\t\t// Not allowed by the language spec.\n\t\truntimePanic(\"close of nil channel\")\n\t}\n\tswitch ch.state {\n\tcase chanStateClosed:\n\t\t// Not allowed by the language spec.\n\t\truntimePanic(\"close of closed channel\")\n\tcase chanStateSend:\n\t\t// This panic should ideally on the sending side, not in this goroutine.\n\t\t// But when a goroutine tries to send while the channel is being closed,\n\t\t// that is clearly invalid: the send should have been completed already\n\t\t// before the close.\n\t\truntimePanic(\"close channel during send\")\n\tcase chanStateRecv:\n\t\t// unblock all receivers with the zero value\n\t\tch.state = chanStateClosed\n\t\tfor ch.blocked != nil {\n\t\t\tch.resumeRX(false)\n\t\t}\n\tcase chanStateEmpty, chanStateBuf:\n\t\t// Easy case. No available sender or receiver.\n\t}\n\tch.state = chanStateClosed\n\tchanDebug(ch)\n}", "func (v *hvsockConn) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {\n\tif err != syscall.ERROR_IO_PENDING {\n\t\tv.wg.Done()\n\t\treturn int(bytes), err\n\t}\n\n\tif v.closing {\n\t\tcancelIoEx(v.fd, &c.o)\n\t}\n\n\tvar timeout timeoutChan\n\tif d != nil {\n\t\td.channelLock.Lock()\n\t\ttimeout = d.channel\n\t\td.channelLock.Unlock()\n\t}\n\n\tvar r ioResult\n\tselect {\n\tcase r = <-c.ch:\n\t\terr = r.err\n\t\tif err == syscall.ERROR_OPERATION_ABORTED {\n\t\t\tif v.closing {\n\t\t\t\terr = ErrSocketClosed\n\t\t\t}\n\t\t}\n\tcase <-timeout:\n\t\tcancelIoEx(v.fd, &c.o)\n\t\tr = <-c.ch\n\t\terr = r.err\n\t\tif err == syscall.ERROR_OPERATION_ABORTED {\n\t\t\terr = ErrTimeout\n\t\t}\n\t}\n\n\t// runtime.KeepAlive is needed, as c is passed via native\n\t// code to ioCompletionProcessor, c must remain alive\n\t// until the channel read is complete.\n\truntime.KeepAlive(c)\n\tv.wg.Done()\n\treturn int(r.bytes), err\n}", "func (p CustomProcessor) WaitForClose(timeout time.Duration) error {\n\t// Do nothing as our processor doesn't require resource cleanup.\n\treturn nil\n}", "func (c *Channel) Close() error {\n\treturn c.exit(false)\n}", "func TestRaceBetweenChannelShutdownAndSend(t *testing.T) {\n\tdefer time.AfterFunc(10*time.Second, func() { panic(\"Close deadlock\") }).Stop()\n\n\tconn := integrationConnection(t, \"channel close/send race\")\n\tdefer conn.Close()\n\n\tch, _ := conn.Channel()\n\n\tgo ch.Close()\n\tfor i := 0; i < 10; i++ {\n\t\tgo func() {\n\t\t\t// ch.Ack calls ch.send() internally.\n\t\t\tch.Ack(42, false)\n\t\t}()\n\t}\n}", "func (a *Adapter) Close() {\n\tclose(a.done)\n\ta.doneWg.Wait()\n\ta.conn.Close()\n}", "func (s *Server) Close() error {\n\ts.done <- true\n\treturn nil\n}", "func (s *ConcurrentServer) Close() {\n\tclose(s.taskCh)\n\ts.wg.Wait()\n}", "func (b *Bloblang) CloseAsync() {\n\tif b.timer != nil {\n\t\tb.timer.Stop()\n\t}\n}", "func (s *Session) doClose(ctx context.Context, name Name, f func(ctx context.Context, conn *grpc.ClientConn, header *headers.RequestHeader) (*headers.ResponseHeader, interface{}, error)) error {\n\treturn s.doPrimitive(ctx, name, f)\n}", "func (c *connection) Close() {\n\tbaseurl := \"http://fritz.box/webservices/homeautoswitch.lua\"\n\tparameters := make(map[string]string)\n\tparameters[\"sid\"] = c.sid\n\tparameters[\"logout\"] = \"logout\"\n\tUrl := prepareRequest(baseurl, parameters)\n\tsendRequest(Url)\n}", "func routeSmartCardClose(w http.ResponseWriter, r *http.Request) {\n\tapi.SmartCardDisconnect()\n\tfmt.Fprintln(w, `{ \"success\" : true}`)\n}", "func (dc DoneChan) Done() <-chan struct{} { return dc }", "func (c *MockedHTTPContext) Done() <-chan struct{} {\n\tif c.MockedDone != nil {\n\t\treturn c.MockedDone()\n\t}\n\treturn nil\n}", "func (s *Connection) CloseWait() error {\n\tcloseErr := s.Close()\n\tif closeErr != nil {\n\t\treturn closeErr\n\t}\n\tshutdownErr, ok := <-s.shutdownChan\n\tif ok {\n\t\treturn shutdownErr\n\t}\n\treturn nil\n}", "func (ch *Channel) close() {\n\tch.watcherWg.Wait()\n\n\tif err := ch.pch.Close(); err != nil {\n\t\tch.Error(\"Closing channel\", err)\n\t}\n\tch.status = closed\n}", "func (s *SignalFx) Close() error {\n\ts.ctx.Done()\n\ts.client = nil\n\tclose(s.done)\n\ts.wg.Wait()\n\treturn nil\n}", "func (p *AsyncBundleUnacks) WaitForClose(tout time.Duration) error {\n\treturn p.r.WaitForClose(tout)\n}", "func gracefulShutdown(exitSig <-chan os.Signal, done chan error, timeout time.Duration) {\n\t// start_shutdown1 OMIT\n\tsig := <-exitSig // same as before // HL\n\tfmt.Printf(\"received signal: %q, starting graceful shutdown...\\n\",\n\t\tsig.String())\n\n\t// start timeout countdown // HL\n\tctx, cancel := context.WithTimeout(context.Background(), timeout) // HL\n\tdefer cancel()\n\t// end_shutdown1 OMIT\n\n\t// start_shutdown_timeout OMIT\n\tgo func() {\n\t\t<-ctx.Done() // wait until context timeout or it's cancelled // HL\n\t\tif err := ctx.Err(); err != nil {\n\t\t\tdone <- fmt.Errorf(\"graceful shutdown failed: %w\", err) // HL\n\t\t}\n\t}()\n\t// end_shutdown_timeout OMIT\n\n\t// start_shutdown_wg OMIT\n\t// tl;dr: a WaitGroup is a counter which can increase and decrease. // HL\n\t// Calling WaitGroup.Wait() will block until the counter is ZERO // HL\n\twg := &sync.WaitGroup{}\n\n\twg.Add(4) // adds 4 to the counter // HL\n\tgo closeDependency(ctx, 0, wg)\n\tgo closeDependency(ctx, 1, wg)\n\tgo closeDependency(ctx, 2, wg)\n\tgo closeDependency(ctx, 3, wg)\n\n\twg.Wait() // Wait() will return when WaitGroup counter == 0 // HL\n\t// end_shutdown_wg OMIT\n\tfmt.Println(\"shutdown complete\")\n\tdone <- nil\n}", "func (k *Kafka) CloseAsync() {\n\tif atomic.CompareAndSwapInt32(&k.running, 1, 0) {\n\t\tclose(k.closeChan)\n\t}\n}", "func SafeClose(ch chan MetadataSyncResult) (closed bool) {\n\tif ch == nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif recover() != nil {\n\t\t\tclosed = true\n\t\t}\n\t}()\n\n\tclose(ch)\n\treturn false\n}", "func (p *asyncPipeline) Close() {\n\tclose(p.chDie)\n\t<-p.chExit\n}", "func (s *Sync) Close() {\n\ts.n.Close()\n\ts.s.Close()\n\ts.a.Close()\n}", "func (v *DCHttpResponse) Close() {\n\tif !v.Raw.Close && v.Raw.Body != nil {\n\t\tv.Raw.Body.Close()\n\t}\n}", "func (c *context) Done() <-chan struct{} { return c.c.Done() }", "func (client *Client) Close() {\n\t// Set halting flag and then close our socket to server.\n\t// This will cause the blocked getIO() in readReplies() to return.\n\tclient.Lock()\n\tclient.halting = true\n\tif client.connection.state == CONNECTED {\n\t\tclient.connection.state = INITIAL\n\t\tclient.connection.Close()\n\t}\n\tclient.Unlock()\n\n\t// Wait for the goroutines to return\n\tclient.goroutineWG.Wait()\n\tbucketstats.UnRegister(bucketStatsPkgName, client.GetStatsGroupName())\n}", "func (c *channel) close() {\n\tc.heartbeatLck.Lock()\n\t// nil heartbeatStop indicates that heartbeat is disabled\n\t// due to lost remote and channel is basically in\n\t// \"dangling\" state.\n\tif c.heartbeatInterval != 0 && c.heartbeatStop != nil {\n\t\tc.heartbeatStop <- struct{}{}\n\t\tc.heartbeatTicker.Stop()\n\t\t<-c.heartbeatStop\n\t}\n\n\tc.heartbeatLck.Unlock()\n\n\tc.stateLck.Lock()\n\tdefer c.stateLck.Unlock()\n\n\tc.stopping = true\n\n\tclose(c.queue)\n\tc.errorCh = nil\n}", "func (c *client) Close() error { return c.c.Close() }", "func (t *SyncTransport) Close() error {\n\treturn nil\n}", "func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {\n\ts.goAwayTimeout = timeout\n\ts.lastStreamChan = c\n}", "func (p CustomProcessor) CloseAsync() {\n\t// Do nothing as our processor doesn't require resource cleanup.\n}", "func (conn *Tunnel) Close() {\n\tconn.once.Do(func() {\n\t\tconn.requestDisc()\n\n\t\tclose(conn.done)\n\t\tconn.wait.Wait()\n\n\t\tconn.sock.Close()\n\t})\n}", "func Close() {\n\tif App.WatchEnabled && !App.CmdMode {\n\t\tchannelExchangeCommands(LevelCritical, command{Action: \"DONE\"})\n\t}\n}", "func (s *Session) Close() error {\n\tif !s.IsReady() {\n\t\treturn errAlreadyClosed\n\t}\n\tif err := s.channel.Close(); err != nil {\n\t\treturn err\n\t}\n\tif err := s.connection.Close(); err != nil {\n\t\treturn err\n\t}\n\tclose(s.done)\n\n\ts.m.Lock()\n\t{\n\t\ts.isReady = false\n\t}\n\ts.m.Unlock()\n\n\treturn nil\n}", "func (s *SQL) CloseAsync() {\n\ts.closeOnce.Do(func() {\n\t\tclose(s.closeChan)\n\t})\n}", "func (c *ConnectionManager) Done() <-chan struct{} {\n\treturn c.done\n}", "func (conn *waitConn) WaitForClose() {\n\t<-conn.ctx.Done()\n}", "func (c *Client) Done() <-chan struct{} {\n\treturn c.errc\n}", "func (tv *TV) teardownResponseChannel(id string) {\n\ttv.resMutex.Lock()\n\tdefer tv.resMutex.Unlock()\n\n\tif ch, ok := tv.res[id]; ok {\n\t\tclose(ch)\n\t\tdelete(tv.res, id)\n\t}\n}", "func (p *Peer) quitSync(po int) {\n\tlive := NewStream(\"SYNC\", FormatSyncBinKey(uint8(po)), true)\n\thistory := getHistoryStream(live)\n\terr := p.streamer.Quit(p.ID(), live)\n\tif err != nil && err != p2p.ErrShuttingDown {\n\t\tlog.Error(\"quit\", \"err\", err, \"peer\", p.ID(), \"stream\", live)\n\t}\n\terr = p.streamer.Quit(p.ID(), history)\n\tif err != nil && err != p2p.ErrShuttingDown {\n\t\tlog.Error(\"quit\", \"err\", err, \"peer\", p.ID(), \"stream\", history)\n\t}\n\n\terr = p.removeServer(live)\n\tif err != nil {\n\t\tlog.Error(\"remove server\", \"err\", err, \"peer\", p.ID(), \"stream\", live)\n\t}\n\terr = p.removeServer(history)\n\tif err != nil {\n\t\tlog.Error(\"remove server\", \"err\", err, \"peer\", p.ID(), \"stream\", live)\n\t}\n}", "func (c *client) close() {\n\tc.leave()\n\tc.Conn.Close()\n\tc.Message <- \"/quit\"\n}", "func (closer *Closer) CloseChannel() chan struct{} {\n\treturn closer.channel\n}", "func TestAcceptClose(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tclient.Write(lbuf)\n\tclient.Write(queryData)\n\n\t// Read query\n\tqueryRead := <-doh.query\n\tif !bytes.Equal(queryRead, queryData) {\n\t\tt.Error(\"Query mismatch\")\n\t}\n\n\t// Close the TCP connection\n\tclient.Close()\n\n\t// Send fake response too late.\n\tresponseData := []byte{1, 2, 8, 9, 10}\n\tdoh.response <- responseData\n}", "func (cha *Channel) close() {\n\t// not care about channel close error, because it's the client action\n\tcha.cha.Close()\n\n\tcha.conn.decrNumOpenedChannel()\n\tcha.cha = nil\n\tcha.conn = nil\n}", "func (r *RemoteSSH) Close() error {\n\tvar err error\n\tfor i := 0; i < r.n; i++ {\n\t\tclient := <-r.pool\n\t\terr = client.SendGoodbye()\n\t}\n\treturn err\n}", "func (fic *FakeClient) Close() {\n}", "func (c *Client) Close() {}", "func (c *Client) Close() {}", "func (c *Conn) closed() {\n\tc.done = true\n}", "func (r *Resource) WaitForClose(timeout time.Duration) error {\n\treturn nil\n}", "func (r *Resource) WaitForClose(timeout time.Duration) error {\n\treturn nil\n}", "func (c *Control) Done() <-chan struct{} {\n\treturn c.doneCh\n}", "func (c *TimeoutChan) Close() {\n\tclose(c.in)\n\tc.pushCtrl.Wait()\n\tclose(c.closePush)\n\tc.popCtrl.Wait()\n\tclose(c.out)\n\tclose(c.resumePush)\n\tclose(c.resumePop)\n\tclose(c.reschedule)\n}", "func (p *PipeWrapper) Close() {\n\tp.Done = true\n}", "func (c *client) Close() {\n\tc.exit <- 1\n}", "func Done(ctx context.Context) {\n\tif wait, ok := getWaitChan(ctx); ok {\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\n\t\tselect {\n\t\tcase <-wait:\n\t\tdefault:\n\t\t\tclose(wait)\n\t\t}\n\t}\n\n\treturn\n}", "func (s *Server) Done() <-chan struct{} {\n\treturn s.shuttingDown\n}", "func (c *ClientLink) close() error {\n\tc.Do(func() {\n\t\tc.log.Info(\"client is closing\")\n\t\tdefer c.log.Info(\"client has closed\")\n\t\tc.tomb.Kill(nil)\n\t\tc.manager.delClient(c)\n\t})\n\treturn c.tomb.Wait()\n}", "func (rm *REKTManager) Close() {\n\trm.wg.Wait()\n\tclose(rm.respchan)\n\trm.respwg.Wait()\n}", "func DoneResponse() Response {\n\tr := Response{\n\t\t&response{\n\t\t\terr: nil,\n\t\t\tch: make(chan struct{}),\n\t\t\tdone: true,\n\t\t},\n\t}\n\tclose(r.ch)\n\treturn r\n}" ]
[ "0.64019233", "0.6376821", "0.61740726", "0.5934891", "0.58843786", "0.5758393", "0.5754767", "0.5754767", "0.571977", "0.56921095", "0.5534144", "0.5531799", "0.5499394", "0.5489041", "0.5486152", "0.5458242", "0.54383165", "0.5437005", "0.5396349", "0.5362098", "0.5360334", "0.53585714", "0.53469133", "0.5342644", "0.5338041", "0.5294883", "0.52771986", "0.5275077", "0.52618283", "0.52488834", "0.5248501", "0.5231141", "0.522105", "0.5218225", "0.51968384", "0.51918924", "0.5181789", "0.5179935", "0.5173004", "0.51593924", "0.5148629", "0.51436037", "0.51255697", "0.5116255", "0.51106167", "0.51001716", "0.5095922", "0.50926137", "0.50836813", "0.50807893", "0.50782764", "0.5078092", "0.5075449", "0.5067238", "0.50623953", "0.50570834", "0.50479364", "0.5044633", "0.50357586", "0.50337875", "0.5027628", "0.50255156", "0.5015496", "0.5012572", "0.50098413", "0.5009165", "0.5007868", "0.5000022", "0.49922848", "0.49905273", "0.49817982", "0.4968971", "0.49664718", "0.49626964", "0.49621072", "0.4948155", "0.4941855", "0.49413657", "0.49396288", "0.4932942", "0.49159476", "0.49046955", "0.49040222", "0.48995936", "0.48995876", "0.4897589", "0.48975462", "0.48975462", "0.4895956", "0.4895935", "0.4895935", "0.489424", "0.4887257", "0.48793224", "0.48675412", "0.48608634", "0.4857804", "0.48576218", "0.4856615", "0.48553234" ]
0.8075955
0
tagsHash returns a hash of tag key/value pairs.
func (r *Row) tagsHash() uint64 { h := fnv.New64a() keys := r.tagsKeys() for _, k := range keys { h.Write([]byte(k)) h.Write([]byte(r.Tags[k])) } return h.Sum64() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (ts *TagSet) Hash() (uint64, uint64) {\n\treturn ts.hashH, ts.hashL\n}", "func hash(ls prometheus.Tags) uint64 {\n\tlbs := make(labels.Labels, 0, len(ls))\n\tfor k, v := range ls {\n\t\tlbs = append(lbs, labels.Label{\n\t\t\tName: k,\n\t\t\tValue: v,\n\t\t})\n\t}\n\n\tsort.Slice(lbs[:], func(i, j int) bool {\n\t\treturn lbs[i].Name < lbs[j].Name\n\t})\n\n\treturn lbs.Hash()\n}", "func Tags(tags tftags.KeyValueTags) map[string]*string {\n\treturn aws.StringMap(tags.Map())\n}", "func (fs *Memory) Tags(_ string) (map[string]string, error) {\n\ttags := make(map[string]string)\n\treturn tags, nil\n}", "func (dtk *DcmTagKey) Hash() uint32 {\n\treturn ((uint32(int(dtk.group)<<16) & 0xffff0000) | (uint32(int(dtk.element) & 0xffff)))\n}", "func (ts *TagSet) HashH() uint64 {\n\treturn ts.hashH\n}", "func calculateAttributesHash(attributes []string) (attrHash string) {\n\n\tkeys := make([]string, len(attributes))\n\n\tfor _, k := range attributes {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\n\tvalues := make([]byte, len(keys))\n\n\tfor _, k := range keys {\n\t\tvb := []byte(k)\n\t\tfor _, bval := range vb {\n\t\t\tvalues = append(values, bval)\n\t\t}\n\t}\n\tattributesHash := primitives.Hash(values)\n\treturn hex.EncodeToString(attributesHash)\n\n}", "func (E ERC20Lock) Tags() kv.Pairs {\n\ttags := make([]kv.Pair, 0)\n\n\ttag := kv.Pair{\n\t\tKey: []byte(\"tx.type\"),\n\t\tValue: []byte(E.Type().String()),\n\t}\n\ttag2 := kv.Pair{\n\t\tKey: []byte(\"tx.locker\"),\n\t\tValue: E.Locker.Bytes(),\n\t}\n\ttag3 := kv.Pair{\n\t\tKey: []byte(\"tx.tracker\"),\n\t\tValue: ethcommon.BytesToHash(E.ETHTxn).Bytes(),\n\t}\n\n\ttags = append(tags, tag, tag2, tag3)\n\treturn tags\n}", "func (addr *Address) Tag() []byte {\n\tvar a = make([]byte, 32)\n\tcopy(a, addr.calcDoubleHash()[32:])\n\treturn a\n}", "func Tags(v interface{}, key string) (map[string]string, error) {\n\treturn New(v).Tags(key)\n}", "func (o BucketLifecycleConfigurationV2RuleFilterAndOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BucketLifecycleConfigurationV2RuleFilterAnd) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func Tags(tags tftags.KeyValueTags) []*inspector.Tag {\n\tresult := make([]*inspector.Tag, 0, len(tags))\n\n\tfor k, v := range tags.Map() {\n\t\ttag := &inspector.Tag{\n\t\t\tKey: aws.String(k),\n\t\t\tValue: aws.String(v),\n\t\t}\n\n\t\tresult = append(result, tag)\n\t}\n\n\treturn result\n}", "func (o DatastoreFileshareOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *DatastoreFileshare) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func tagKeysToMap(tags string) map[string]*string {\n\toutput := make(map[string]*string)\n\n\tfor _, tag := range strings.Split(strings.TrimSpace(tags), \",\") {\n\t\tsplit := strings.SplitN(tag, \"=\", 2)\n\t\tkey := strings.TrimSpace(split[0])\n\t\tvalue := \"\"\n\n\t\tif key == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(split) > 1 {\n\t\t\tvalue = strings.TrimSpace(split[1])\n\t\t}\n\n\t\toutput[key] = &value\n\t}\n\n\tif len(output) == 0 {\n\t\treturn nil\n\t}\n\n\treturn output\n}", "func (c *TenetConfig) Hash() string {\n\thash := strings.Join([]string{c.Name, c.Driver, c.Registry, c.Tag}, \",\")\n\tfor k, v := range c.Options {\n\t\thash += k + v.(string)\n\t}\n\treturn hash\n}", "func (o AnalyticsConfigurationFilterOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v AnalyticsConfigurationFilter) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (r *Bucket) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (o BucketLifecycleRuleOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRule) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (spec Spec) DeepHash() string {\n\thash := sha512.New512_224()\n\tspec.DefaultService.hash(hash)\n\tfor _, rule := range spec.Rules {\n\t\trule.hash(hash)\n\t}\n\tsvcs := make([]string, len(spec.AllServices))\n\ti := 0\n\tfor k := range spec.AllServices {\n\t\tsvcs[i] = k\n\t\ti++\n\t}\n\tsort.Strings(svcs)\n\tfor _, svc := range svcs {\n\t\thash.Write([]byte(svc))\n\t\tspec.AllServices[svc].hash(hash)\n\t}\n\tspec.ShardCluster.hash(hash)\n\thash.Write([]byte(spec.VCL))\n\tfor _, auth := range spec.Auths {\n\t\tauth.hash(hash)\n\t}\n\tfor _, acl := range spec.ACLs {\n\t\tacl.hash(hash)\n\t}\n\tfor _, rw := range spec.Rewrites {\n\t\trw.hash(hash)\n\t}\n\tfor _, reqDisp := range spec.Dispositions {\n\t\treqDisp.hash(hash)\n\t}\n\th := new(big.Int)\n\th.SetBytes(hash.Sum(nil))\n\treturn h.Text(62)\n}", "func (l *LocalService) GetTags() map[string]string {\n\treturn map[string]string{}\n}", "func (o BucketLifecycleConfigurationV2RuleFilterAndPtrOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleFilterAnd) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Tags\n\t}).(pulumi.StringMapOutput)\n}", "func Hashtags(ctx *context.Context) {\n\n\t// get the LANG-ubn repository name prefix\n\tvar repo_prefix string\n\tif strings.HasSuffix(ctx.Repo.Repository.Name, \"-ubn\") {\n\t\trepo_prefix = ctx.Repo.Repository.Name\n\t} else {\n\t\tchar_index := strings.LastIndex(ctx.Repo.Repository.Name, \"-ubn-\")\n\t\trepo_prefix = ctx.Repo.Repository.Name[0:char_index + 4]\n\t}\n\n\tctx.Data[\"username\"] = ctx.Repo.Repository.Owner.Name\n\tctx.Data[\"reponame\"] = ctx.Repo.Repository.Name\n\tctx.Data[\"RepoLink\"] = ctx.Repo.Repository.Link()\n\tctx.Data[\"Title\"] = ctx.Tr(\"repo.hashtag.all_hashtags\", ctx.Repo.Repository.Owner.Name + \"/\" + repo_prefix)\n\tresults, err := models.GetHashtagSummary(repo_prefix, ctx.Repo.Repository.Owner.ID)\n\n\tif err != nil {\n\t\tlog.Error(4, \"Hashtags: %v\", err)\n\t\tctx.Handle(http.StatusInternalServerError, \"GetHashtagSummary\", err)\n\t\treturn\n\t}\n\tctx.Data[\"Tags\"] = results\n\n\tctx.HTML(200, HASHTAGS)\n}", "func (o AnalyticsConfigurationFilterPtrOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *AnalyticsConfigurationFilter) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Tags\n\t}).(pulumi.StringMapOutput)\n}", "func (o BucketMetricFilterOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BucketMetricFilter) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (m *Measurement) uniqueTagValues(expr influxql.Expr) map[string][]string {\n\t// Track unique value per tag.\n\ttags := make(map[string]map[string]struct{})\n\n\t// Find all tag values referenced in the expression.\n\tinfluxql.WalkFunc(expr, func(n influxql.Node) {\n\t\tswitch n := n.(type) {\n\t\tcase *influxql.BinaryExpr:\n\t\t\t// Ignore operators that are not equality.\n\t\t\tif n.Op != influxql.EQ {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Extract ref and string literal.\n\t\t\tvar key, value string\n\t\t\tswitch lhs := n.LHS.(type) {\n\t\t\tcase *influxql.VarRef:\n\t\t\t\tif rhs, ok := n.RHS.(*influxql.StringLiteral); ok {\n\t\t\t\t\tkey, value = lhs.Val, rhs.Val\n\t\t\t\t}\n\t\t\tcase *influxql.StringLiteral:\n\t\t\t\tif rhs, ok := n.RHS.(*influxql.VarRef); ok {\n\t\t\t\t\tkey, value = rhs.Val, lhs.Val\n\t\t\t\t}\n\t\t\t}\n\t\t\tif key == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add value to set.\n\t\t\tif tags[key] == nil {\n\t\t\t\ttags[key] = make(map[string]struct{})\n\t\t\t}\n\t\t\ttags[key][value] = struct{}{}\n\t\t}\n\t})\n\n\t// Convert to map of slices.\n\tout := make(map[string][]string)\n\tfor k, values := range tags {\n\t\tout[k] = make([]string, 0, len(values))\n\t\tfor v := range values {\n\t\t\tout[k] = append(out[k], v)\n\t\t}\n\t\tsort.Strings(out[k])\n\t}\n\treturn out\n}", "func (options *Options) Hash() map[string]interface{} {\n\treturn options.hash\n}", "func (v *View) TagFilter() map[string]string {\n\tfilter := map[string]string{}\n\tfor _, t := range v.tags {\n\t\tp := strings.Split(t, \"=\")\n\t\tif len(p) == 2 {\n\t\t\tfilter[p[0]] = p[1]\n\t\t} else {\n\t\t\tfilter[p[0]] = \"\"\n\t\t}\n\t}\n\treturn filter\n}", "func (o LinuxWebAppOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *LinuxWebApp) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (t *TxPublish) Hash() (out [32]byte) {\n\th := sha256.New()\n\tbinary.Write(h, binary.LittleEndian,\n\t\tuint32(len(t.Name)))\n\th.Write([]byte(t.Name))\n\th.Write(t.MetafileHash)\n\tcopy(out[:], h.Sum(nil))\n\treturn\n}", "func GetHashStrategies() map[string]hash.Hash {\n\thashMap := map[string]hash.Hash{\n\t\t\"sha256\": sha256.New(),\n\t}\n\treturn hashMap\n}", "func (s *Structx) Tags(key string) (map[string]string, error) {\n\tresult := map[string]string{}\n\n\tfs := s.Fields()\n\tfor _, v := range fs {\n\t\tif !v.IsExport() {\n\t\t\tcontinue\n\t\t}\n\t\tresult[v.Name()] = v.Tag(key)\n\t}\n\n\treturn result, nil\n}", "func (o BucketIntelligentTieringConfigurationFilterOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BucketIntelligentTieringConfigurationFilter) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o VolumeGroupSapHanaVolumeOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v VolumeGroupSapHanaVolume) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o BucketOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Bucket) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o GetVolumeGroupSapHanaVolumeOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v GetVolumeGroupSapHanaVolume) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (r *Rule) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (o TrackerOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Tracker) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func ParseTags(in string) (map[string]string, error) {\n\tout := map[string]string{}\n\n\tfor _, entry := range strings.Split(in, \",\") {\n\t\tentry = strings.TrimSpace(entry)\n\t\tif entry == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tfields := strings.SplitN(entry, \":\", 2)\n\t\tif len(fields) < 2 {\n\t\t\treturn nil, fmt.Errorf(\"Invalid tag: %s\", entry)\n\t\t}\n\n\t\t_, ok := out[fields[0]]\n\t\tif ok {\n\t\t\treturn nil, fmt.Errorf(\"Duplicate tag: %s\", entry)\n\t\t}\n\n\t\tout[fields[0]] = fields[1]\n\t}\n\n\treturn out, nil\n}", "func (m *Measurement) TagKeys() []string {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tkeys := make([]string, 0, len(m.seriesByTagKeyValue))\n\tfor k := range m.seriesByTagKeyValue {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (o BucketMetricFilterPtrOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *BucketMetricFilter) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Tags\n\t}).(pulumi.StringMapOutput)\n}", "func (term *Term) Hash() int {\n\treturn term.Value.Hash()\n}", "func (o LookupSharedImageResultOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupSharedImageResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o BucketIntelligentTieringConfigurationFilterPtrOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *BucketIntelligentTieringConfigurationFilter) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Tags\n\t}).(pulumi.StringMapOutput)\n}", "func TagsDiff(sqsTags map[string]string, newTags map[string]string) (removed, added map[string]string) {\n\tremoved = map[string]string{}\n\tfor k, v := range sqsTags {\n\t\tif _, ok := newTags[k]; !ok {\n\t\t\tremoved[k] = v\n\t\t}\n\t}\n\n\tadded = map[string]string{}\n\tfor k, newV := range newTags {\n\t\tif oldV, ok := sqsTags[k]; !ok || oldV != newV {\n\t\t\tadded[k] = newV\n\t\t}\n\t}\n\treturn\n}", "func (aiService *AppinsightsMonitorService) getTags(tagType string, name string) map[string]*string {\n\n\ttags := make(map[string]*string)\n\n\tcomponentHiddenlink := fmt.Sprintf(\"hidden-link:/subscriptions/%s/resourceGroups/%s/providers/microsoft.insights/components/%s\", aiService.subscriptionID, aiService.resourceGroup, aiService.name)\n\twebtestHiddenlink := fmt.Sprintf(\"hidden-link:/subscriptions/%s/resourceGroups/%s/providers/microsoft.insights/webtests/%s\", aiService.subscriptionID, aiService.resourceGroup, name)\n\tvalue := \"Resource\"\n\n\tif tagType == \"webtest\" {\n\t\ttags[componentHiddenlink] = &value\n\t}\n\n\tif tagType == \"alert\" {\n\n\t\ttags[componentHiddenlink] = &value\n\t\ttags[webtestHiddenlink] = &value\n\t}\n\n\treturn tags\n}", "func (h *Header) Hash() [32]byte {\n\tvar f []string\n\tif h.Description.Value != \"\" {\n\t\tf = append(f, h.Description.Value)\n\t}\n\tf = append(f, fmt.Sprint(h.Required.Value))\n\tf = append(f, fmt.Sprint(h.Deprecated.Value))\n\tf = append(f, fmt.Sprint(h.AllowEmptyValue.Value))\n\tif h.Style.Value != \"\" {\n\t\tf = append(f, h.Style.Value)\n\t}\n\tf = append(f, fmt.Sprint(h.Explode.Value))\n\tf = append(f, fmt.Sprint(h.AllowReserved.Value))\n\tif h.Schema.Value != nil {\n\t\tf = append(f, low.GenerateHashString(h.Schema.Value))\n\t}\n\tif h.Example.Value != nil {\n\t\tf = append(f, fmt.Sprint(h.Example.Value))\n\t}\n\tif len(h.Examples.Value) > 0 {\n\t\tfor k := range h.Examples.Value {\n\t\t\tf = append(f, fmt.Sprintf(\"%s-%x\", k.Value, h.Examples.Value[k].Value.Hash()))\n\t\t}\n\t}\n\tif len(h.Content.Value) > 0 {\n\t\tfor k := range h.Content.Value {\n\t\t\tf = append(f, fmt.Sprintf(\"%s-%x\", k.Value, h.Content.Value[k].Value.Hash()))\n\t\t}\n\t}\n\tkeys := make([]string, len(h.Extensions))\n\tz := 0\n\tfor k := range h.Extensions {\n\t\tkeys[z] = fmt.Sprintf(\"%s-%x\", k.Value, sha256.Sum256([]byte(fmt.Sprint(h.Extensions[k].Value))))\n\t\tz++\n\t}\n\tsort.Strings(keys)\n\tf = append(f, keys...)\n\treturn sha256.Sum256([]byte(strings.Join(f, \"|\")))\n}", "func (n *SoupNode) Hash() (uint32, error) {\n\treturn hashString(fmt.Sprintf(\"%v\", *n)), nil\n}", "func (t *File) Hash() [32]byte {\n\tvar out [32]byte\n\th := sha256.New()\n\tbinary.Write(h, binary.LittleEndian, uint32(len(t.Name)))\n\th.Write([]byte(t.Name))\n\th.Write(t.MetafileHash)\n\tcopy(out[:], h.Sum(nil))\n\treturn out\n}", "func (t *TxPublish) Hash() (out [32]byte) {\n\th := sha256.New()\n\tbinary.Write(h, binary.LittleEndian,\n\t\tuint32(len(t.File.Name)))\n\th.Write([]byte(t.File.Name))\n\th.Write(t.File.MetafileHash)\n\tcopy(out[:], h.Sum(nil))\n\treturn\n}", "func parseTags(tag string) (ret block.Dict) {\n\tif len(tag) > 0 {\n\t\ttags := make(block.Dict)\n\t\tvisitTags(tag, func(k, v string) {\n\t\t\ttags[k] = parseCommas(v)\n\t\t})\n\t\tret = tags\n\t}\n\treturn\n}", "func (o PhpAppLayerOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *PhpAppLayer) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o FaqOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Faq) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o BucketReplicationConfigurationRuleFilterOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BucketReplicationConfigurationRuleFilter) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o *FiltersVirtualGateway) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (o DataCollectionRuleOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *DataCollectionRule) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (ts *TagSet) HashL() uint64 {\n\treturn ts.hashL\n}", "func (config *Configuration) ContainerisationHash() []byte {\n\th := sha1.New()\n\tencoder := gob.NewEncoder(h)\n\tif err := encoder.Encode(config.Docker); err != nil {\n\t\tpanic(err)\n\t}\n\treturn h.Sum(nil)\n}", "func (r *VpcLink) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (o AnalyzerOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Analyzer) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o AnalyzerOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Analyzer) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o RunBookOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *RunBook) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func mapTags(tags []*elb.Tag) map[string]string {\n\ttagMap := make(map[string]string)\n\tfor _, t := range tags {\n\t\ttagMap[*t.Key] = *t.Value\n\t}\n\n\treturn tagMap\n}", "func (r *Trail) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func lbTags(app string, process string) map[string]string {\n\treturn map[string]string{\n\t\t\"AppID\": app,\n\t\t\"ProcessType\": process,\n\t}\n}", "func (o LookupOpenZfsSnapshotResultOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupOpenZfsSnapshotResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (h *Hub) Tags() []string {\n\treturn h.tags\n}", "func (d *DiskIOState) GetTags() []string {\n\tvar tags []string\n\tvalueMap := structs.Map(d)\n\tfor k := range valueMap {\n\t\ttags = append(tags, k)\n\t}\n\n\treturn tags\n}", "func (o AppMonitorOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *AppMonitor) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o BucketReplicationConfigRuleFilterAndOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BucketReplicationConfigRuleFilterAnd) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func GetTags(dbOwner, dbFolder, dbName string) (tags map[string]TagEntry, err error) {\n\tdbQuery := `\n\t\tSELECT tag_list\n\t\tFROM sqlite_databases\n\t\tWHERE user_id = (\n\t\t\t\tSELECT user_id\n\t\t\t\tFROM users\n\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t)\n\t\t\tAND folder = $2\n\t\t\tAND db_name = $3`\n\terr = pdb.QueryRow(dbQuery, dbOwner, dbFolder, dbName).Scan(&tags)\n\tif err != nil {\n\t\tlog.Printf(\"Error when retrieving tags for database '%s%s%s': %v\\n\", dbOwner, dbFolder, dbName, err)\n\t\treturn nil, err\n\t}\n\tif tags == nil {\n\t\t// If there aren't any tags yet, return an empty set instead of nil\n\t\ttags = make(map[string]TagEntry)\n\t}\n\treturn tags, nil\n}", "func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags {\n\treturn tftags.New(ctx, tags)\n}", "func (o BucketOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *Bucket) pulumi.MapOutput { return v.Tags }).(pulumi.MapOutput)\n}", "func XXHashOfKeyValues(kvs KeyValues) uint64 {\n\ttagKeysLen := len(kvs)\n\tswitch tagKeysLen {\n\tcase 0:\n\t\treturn emptyStringHash\n\tcase 1:\n\t\t// no need to resort when its length is 1\n\tdefault:\n\t\tif !sort.IsSorted(kvs) {\n\t\t\tsort.Sort(kvs)\n\t\t}\n\t}\n\tvar expectLen int\n\t// calculate expected concated string length\n\tfor idx := range kvs {\n\t\texpectLen += len(kvs[idx].Key) + len(kvs[idx].Value) + 1\n\t}\n\texpectLen += tagKeysLen - 1\n\n\tif expectLen <= 256 {\n\t\tvar slice [256]byte\n\t\t// default slice on stack is 256\n\t\treturn xxHashOfSortedKeyValuesOnSlice(slice[:], kvs)\n\t}\n\t// use slice on heap\n\tslice := *getSlice(expectLen)\n\th := xxHashOfSortedKeyValuesOnSlice(slice, kvs)\n\tputSlice(&slice)\n\treturn h\n}", "func hash(m datasource.Metric) uint64 {\n\thash := fnv.New64a()\n\tlabels := m.Labels\n\tsort.Slice(labels, func(i, j int) bool {\n\t\treturn labels[i].Name < labels[j].Name\n\t})\n\tfor _, l := range labels {\n\t\t// drop __name__ to be consistent with Prometheus alerting\n\t\tif l.Name == \"__name__\" {\n\t\t\tcontinue\n\t\t}\n\t\thash.Write([]byte(l.Name))\n\t\thash.Write([]byte(l.Value))\n\t\thash.Write([]byte(\"\\xff\"))\n\t}\n\treturn hash.Sum64()\n}", "func (p PropertyHashList) Hash() string {\n\tglobalSum := sha256.New()\n\tfor _, hash := range p {\n\t\t_, _ = globalSum.Write(hash.Hash)\n\t}\n\n\tsum := globalSum.Sum(nil)\n\n\treturn hex.EncodeToString(sum)\n}", "func (o BucketV2LifecycleRuleOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BucketV2LifecycleRule) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (sc *SetComprehension) Hash() int {\n\treturn sc.Term.Hash() + sc.Body.Hash()\n}", "func (oc *ObjectComprehension) Hash() int {\n\treturn oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash()\n}", "func (o FleetOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Fleet) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (m *Application) GetTags()([]string) {\n return m.tags\n}", "func (o ListenerRuleOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *ListenerRule) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o RouteFilterOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *RouteFilter) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o IotHubDeviceUpdateAccountOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *IotHubDeviceUpdateAccount) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (mem *MemberKey) Tag() []byte {\n\treturn mem.a.Marshal()\n}", "func (o TaskDefinitionOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *TaskDefinition) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (is *IfaceStat) GetTags() []string {\n\tvar tags []string\n\tvalueMap := structs.Map(is)\n\tfor k := range valueMap {\n\t\ttags = append(tags, k)\n\t}\n\n\treturn tags\n}", "func (o VirtualGatewayOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *VirtualGateway) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o OpenZfsVolumeOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *OpenZfsVolume) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (r *PrivateVirtualInterface) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (cs *ChangeSet) Hash() (string, error) {\n\treturn cs.Added.Hash()\n}", "func (h *Hash) Etag() string {\n\tlength := h.length()\n\n\thash := h.Hash.Sum(nil)\n\n\tvar etagBuf [2 + 64]byte\n\tetag := buffer(2+length, etagBuf[:0])\n\n\tetag[0] = '\"'\n\thex.Encode(etag[1:], hash[:(length+1)/2])\n\tetag[1+length] = '\"'\n\n\treturn string(etag[:2+length])\n}", "func Hash16Tag(name interface{}, value [16]byte) Tag {\n\treturn &tag{\n\t\ttagType: TagHash16,\n\t\tname: name,\n\t\tvalue: value[:],\n\t}\n}", "func Sha256Hash(data []byte) [32]byte {\n\tsum := sha256.Sum256(data)\n\treturn sum\n}", "func (r *Cluster) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (r *Cluster) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (ref Ref) Hash() int {\n\treturn termSliceHash(ref)\n}", "func uniqueTags(t1 gostatsd.Tags, t2 gostatsd.Tags) gostatsd.Tags {\n\treturn uniqueTagsWithSeen(map[string]struct{}{}, t1, t2)\n}", "func (hdr RPMHeader) Tag(tagname string) []string {\n\tfor _, tag := range hdr.Tags {\n\t\tif tag.Name == tagname {\n\t\t\treturn tag.Values\n\t\t}\n\t}\n\treturn []string{\"\"}\n}", "func (sm *SpecMore) TagsMap(inclTop, inclOps bool) map[string]int {\n\ttagsMap := map[string]int{}\n\tif inclTop {\n\t\tfor _, tag := range sm.Spec.Tags {\n\t\t\ttagName := strings.TrimSpace(tag.Name)\n\t\t\tif len(tagName) > 0 {\n\t\t\t\tif _, ok := tagsMap[tagName]; !ok {\n\t\t\t\t\ttagsMap[tagName] = 0\n\t\t\t\t}\n\t\t\t\ttagsMap[tagName]++\n\t\t\t}\n\t\t}\n\t}\n\tif inclOps {\n\t\tVisitOperations(sm.Spec, func(skipPath, skipMethod string, op *oas3.Operation) {\n\t\t\tfor _, tagName := range op.Tags {\n\t\t\t\ttagName = strings.TrimSpace(tagName)\n\t\t\t\tif len(tagName) > 0 {\n\t\t\t\t\tif _, ok := tagsMap[tagName]; !ok {\n\t\t\t\t\t\ttagsMap[tagName] = 0\n\t\t\t\t\t}\n\t\t\t\t\ttagsMap[tagName]++\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n\treturn tagsMap\n}", "func (m FieldMap) Tags() []Tag {\n\ttags := make([]Tag, 0, len(m.tagLookup))\n\tfor t := range m.tagLookup {\n\t\ttags = append(tags, t)\n\t}\n\n\treturn tags\n}" ]
[ "0.6286525", "0.6004645", "0.59311426", "0.56797147", "0.56032187", "0.5577137", "0.54982436", "0.53909326", "0.5379334", "0.53297776", "0.53186053", "0.5296071", "0.52821624", "0.52794707", "0.5247144", "0.52414274", "0.5201673", "0.51643705", "0.5162638", "0.515358", "0.51476634", "0.51329476", "0.51328695", "0.5132421", "0.5126367", "0.5108364", "0.5106931", "0.5102131", "0.50774586", "0.5074795", "0.5061431", "0.5057207", "0.5053363", "0.50531805", "0.5030301", "0.50166553", "0.49709898", "0.4967557", "0.49647924", "0.4958717", "0.49564394", "0.49508044", "0.49502766", "0.4948272", "0.4942676", "0.49295714", "0.49272797", "0.4911161", "0.49094343", "0.48994702", "0.48940262", "0.48892167", "0.4883441", "0.4882536", "0.4881069", "0.48806366", "0.4878204", "0.48729536", "0.48692313", "0.48692313", "0.4869155", "0.4867446", "0.48613828", "0.48613754", "0.48589638", "0.4855152", "0.48479712", "0.48364034", "0.4836168", "0.48333228", "0.48330766", "0.48330563", "0.48330373", "0.48306137", "0.48299703", "0.4823822", "0.4820776", "0.48141935", "0.4802012", "0.48001546", "0.4799396", "0.47973028", "0.4789789", "0.47810674", "0.4779265", "0.4774953", "0.47741872", "0.47714847", "0.47627228", "0.47604418", "0.47585294", "0.47570154", "0.4755453", "0.47522926", "0.47522926", "0.47505763", "0.47504672", "0.47495836", "0.47476804", "0.47418493" ]
0.7376118
0
tagKeys returns a sorted list of tag keys.
func (r *Row) tagsKeys() []string { a := make([]string, len(r.Tags)) for k := range r.Tags { a = append(a, k) } sort.Strings(a) return a }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Measurement) TagKeys() []string {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tkeys := make([]string, 0, len(m.seriesByTagKeyValue))\n\tfor k := range m.seriesByTagKeyValue {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (target *Target) TagKeys() []string {\n\n\tkeys := make([]string, len(target.Tags))\n\n\tfor i, tag := range target.Tags {\n\t\tkeys[i] = tag.Key\n\t\ti++\n\t}\n\n\treturn keys\n}", "func (o *FiltersVirtualGateway) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (o *FiltersVmGroup) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (o *FiltersNet) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (o *FiltersNatService) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (o *FiltersSecurityGroup) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}", "func (m *logMeasurement) keys() []string {\n\ta := make([]string, 0, len(m.tagSet))\n\tfor k := range m.tagSet {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}", "func (self *Map) Keys(tagName ...string) []interface{} {\n\treturn Keys(self.MapNative(tagName...))\n}", "func SortedKeys(i interface{}) []string {\n\tvMap := reflect.ValueOf(i)\n\tvKeys := vMap.MapKeys()\n\tkeys := make([]string, len(vKeys), len(vKeys))\n\tidx := 0\n\tfor _,vKey := range vKeys {\n\t\tkeys[idx] = vKey.String()\n\t\tidx++\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (client *Client) ListTagKeys(request *ListTagKeysRequest) (_result *ListTagKeysResponse, _err error) {\n\truntime := &util.RuntimeOptions{}\n\t_result = &ListTagKeysResponse{}\n\t_body, _err := client.ListTagKeysWithOptions(request, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = _body\n\treturn _result, _err\n}", "func (v ComponentVersions) OrderedKeys() []string {\n\tkeys := make([]string, 0, len(v))\n\tfor k := range v {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func loadTagKeys(tagKeyBucket *bbolt.Bucket) (tags []tag.Meta) {\n\tcursor := tagKeyBucket.Cursor()\n\tfor k, v := cursor.First(); k != nil; k, v = cursor.Next() {\n\t\ttags = append(tags, tag.Meta{\n\t\t\tKey: string(k),\n\t\t\tID: binary.LittleEndian.Uint32(v),\n\t\t})\n\t}\n\treturn\n}", "func (self *Map) StringKeys(tagName ...string) []string {\n\treturn sliceutil.Stringify(self.Keys(tagName...))\n}", "func (tx *Tx) KeysSorted(rev bool) []string {\n\tkeys := tx.Keys()\n\tif rev {\n\t\tsort.Sort(sort.Reverse(sort.StringSlice(keys)))\n\t} else {\n\t\tsort.Sort(sort.StringSlice(keys))\n\t}\n\treturn keys\n}", "func (gc *GokuyamaClient) GetKeysByTag(tag string) ([]string, error) {\n\n\tfmt.Fprintf(gc.conn, fmt.Sprintf(\"3,%s,False\\n\", base64.StdEncoding.EncodeToString([]byte(tag))))\n\n\tret, err := bufio.NewReader(gc.conn).ReadString('\\n')\n\tif ret == \"\" {\n\t\tfmt.Println(err)\n\t}\n\n\trets := strings.Split(ret, \",\")\n\n\tif rets[1] == \"true\" {\n\n\t\ttags := strings.Split(rets[2], \":\")\n\n\t\tfor i, tag := range tags {\n\t\t\tdata, err := base64.StdEncoding.DecodeString(tag)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\ttags[i] = string(data)\n\t\t}\n\n\t\treturn tags, err\n\t} else {\n\t\treturn nil, nil\n\t}\n}", "func (s *Store) GetKeysSortedByName() (result []string) {\n\tresult = make([]string, 0)\n\n\tfor key := range *s {\n\t\tresult = append(result, key)\n\t}\n\n\tsort.Strings(result)\n\n\treturn result\n}", "func SortedKeys(m map[string]interface{}) []string {\n\tkeys := Keys(m)\n\tsort.Strings(keys)\n\treturn keys\n}", "func (tn *TreeNode) Keys() []string {\n\tkeys := make([]string, len(tn.Children))\n\n\ti := 0\n\tfor k := range tn.Children {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\tsort.StringSlice.Sort(keys)\n\treturn keys\n}", "func (sf SearchFilter) GetKeys() []string {\n\tret := funk.Keys(sf.Keys).([]string)\n\tsort.Strings(ret)\n\treturn ret\n}", "func getSortedKeys(modules map[string]*TerraformModule) []string {\n\tkeys := []string{}\n\tfor key := range modules {\n\t\tkeys = append(keys, key)\n\t}\n\n\tsort.Strings(keys)\n\n\treturn keys\n}", "func (m Map) orderedKeys() []string {\n\tkeys := []string{}\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (m LabelMap) Keys() []string {\n\tresult := make([]string, 0, len(m))\n\tfor label := range m {\n\t\tresult = append(result, label)\n\t}\n\tsort.Strings(result)\n\treturn result\n}", "func (m *OrderedMap) Keys() []string { return m.keys }", "func getSortedKeys(m map[string]string) []string {\n\tkeys := make([]string, 0, len(m))\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (o *FiltersVmGroup) SetTagKeys(v []string) {\n\to.TagKeys = &v\n}", "func (mb *metadataBackend) getAllTagKeys(metricID uint32) (tags []tag.Meta, err error) {\n\tvar scratch [4]byte\n\tbinary.LittleEndian.PutUint32(scratch[:], metricID)\n\terr = mb.db.View(func(tx *bbolt.Tx) error {\n\t\tmetricBucket := tx.Bucket(metricBucketName).Bucket(scratch[:])\n\t\tif metricBucket != nil {\n\t\t\ttags = loadTagKeys(metricBucket.Bucket(tagBucketName))\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"%w, metricID: %d\", constants.ErrMetricBucketNotFound, metricID)\n\t})\n\treturn\n}", "func (i StringHashMap[T, V]) Keys() []T {\n\tresult := make([]T, 0, len(i.hashToKey))\n\tfor _, key := range i.hashToKey {\n\t\tresult = append(result, key)\n\t}\n\treturn result\n}", "func getKeysAlphabetically(labels map[string]string) []string {\n\tvar keys []string\n\tfor k := range labels {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (r *staticCollection) Keys() ([]string, error) {\n\tkeys := make([]string, 0, len(r.items))\n\tfor k := range r.items {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\treturn keys, nil\n}", "func (m *OrderedIntMap) Keys() []int { return m.keys }", "func (l *Labels) Keys() []string {\n\treturn l.keys\n}", "func (o *FiltersNet) SetTagKeys(v []string) {\n\to.TagKeys = &v\n}", "func (g *Graph) listOfKeys() []string {\n\tkeys := make([]string, len(g.Nodes))\n\ti := 0\n\n\tfor k := range g.Nodes {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\n\treturn keys\n}", "func (o *FiltersVirtualGateway) SetTagKeys(v []string) {\n\to.TagKeys = &v\n}", "func (m *docsMap) Keys() []string {\n\tvar keys []string\n\tm.Range(func(key string, value *Doc) bool {\n\t\tkeys = append(keys, key)\n\t\treturn true\n\t})\n\tsort.Strings(keys)\n\treturn keys\n}", "func (m *OrderedUintMap) Keys() []uint { return m.keys }", "func (m *Map) Keys() []string {\n\tm.convert()\n\tif len(m.order) > 0 {\n\t\treturn m.order\n\t}\n\n\tresult := make([]string, len(m.items))\n\ti := 0\n\tfor key := range m.items {\n\t\tresult[i] = key\n\t\ti = i + 1\n\t}\n\n\tm.order = result\n\n\treturn result\n}", "func (m Points) Keys() []string {\n\tvar keys []string\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func orderedKeys(kv map[string]interface{}) []string {\n\tfirst_keys := make([]string, len(keyOrder))\n\tkeys := make([]string, 0, len(kv))\n\n\tfor key := range kv {\n\t\tif index, found := keyOrder[key]; found {\n\t\t\tfirst_keys[index] = key\n\t\t} else {\n\t\t\tkeys = append(keys, key)\n\t\t}\n\t}\n\tsort.Strings(keys)\n\treturn append(removeEmptyStrings(first_keys, len(kv)), keys...)\n}", "func (o *FiltersSecurityGroup) SetTagKeys(v []string) {\n\to.TagKeys = &v\n}", "func (p *AgentPool) Keys() []string {\n\tp.Lock()\n\tdefer p.Unlock()\n\tresult := make([]string, 0, len(p.data))\n\tfor k := range p.data {\n\t\tresult = append(result, k)\n\t}\n\treturn result\n}", "func orderStackResourceKeys(m map[string]StackResource) []string {\n\tret := make([]string, len(m))\n\ti := 0\n\n\tfor k := range m {\n\t\tret[i] = k\n\t\ti++\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(ret)))\n\treturn ret\n}", "func orderStackCRDKeys(m map[string]apiextensions.CustomResourceDefinition) []string {\n\tret := make([]string, len(m))\n\ti := 0\n\n\tfor k := range m {\n\t\tret[i] = k\n\t\ti++\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(ret)))\n\treturn ret\n}", "func (cache *Cache) Keys () []string {\n keys := make([]string, len(cache.FilePrints))\n n := 0;\n for k := range cache.FilePrints { keys[n] = k; n++ }\n if len(keys) > 1 { sort.Strings(keys) }\n return keys\n}", "func sortedMapKeys(m *jsonschema.Index) []string {\n\tvar keys []string\n\tfor k, _ := range *m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func orderStackIconKeys(m map[string]*v1alpha1.IconSpec) []string {\n\tret := make([]string, len(m))\n\ti := 0\n\n\tfor k := range m {\n\t\tret[i] = k\n\t\ti++\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(ret)))\n\treturn ret\n}", "func (i IntHashMap[T, V]) Keys() []T {\n\tresult := make([]T, 0, len(i.hashToKey))\n\tfor _, key := range i.hashToKey {\n\t\tresult = append(result, key)\n\t}\n\treturn result\n}", "func (m pbMetricMap) Keys() []string {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func SortedMapKeys(m map[string]interface{}) (keyList []string) {\n\tfor key := range m {\n\t\tkeyList = append(keyList, key)\n\t}\n\tsort.Strings(keyList)\n\treturn\n}", "func SortedKeys(m map[string]*Schema) (keys []string) {\n\tunorderedKeys := make([]string, 0)\n\torderedProperties := propertyOrderedKeys{}\n\n\tfor key := range m {\n\t\tif m[key].PropertyOrder == 0 {\n\t\t\tunorderedKeys = append(unorderedKeys, key)\n\t\t} else {\n\t\t\torderedProperties = append(orderedProperties, propertyOrderedKey{\n\t\t\t\tkey: key,\n\t\t\t\tpropertyOrder: m[key].PropertyOrder,\n\t\t\t})\n\t\t}\n\t}\n\n\t// sort unordered keys first\n\tsort.Strings(unorderedKeys)\n\n\t// sort order given properties\n\tsort.Sort(orderedProperties)\n\n\t// conbine them\n\treturn orderedProperties.prependKeysTo(unorderedKeys)\n}", "func (p *Partitions) Keys() []string {\n\tvar result = make([]string, 0)\n\tfor k := range p.index {\n\t\tresult = append(result, k)\n\t}\n\treturn result\n}", "func (i *OrderedItems) StringKeys() []string {\n\tkeys := make([]string, 0, len(i.OrderedItems))\n\tfor k := range i.OrderedItems {\n\t\tif reflect.TypeOf(k).Kind() == reflect.String {\n\t\t\tkeys = append(keys, k.(string))\n\t\t}\n\t}\n\treturn keys\n}", "func (i *OrderedItems) Keys() []interface{} {\n\tkeys := make([]interface{}, 0, len(i.OrderedItems))\n\tfor k := range i.OrderedItems {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (o *FiltersNatService) SetTagKeys(v []string) {\n\to.TagKeys = &v\n}", "func (t *Tags) NKeys() int { return 0 }", "func (tree *Tree) Keys() []interface{} {\n\tkeys := make([]interface{}, tree.size)\n\tit := tree.Iterator()\n\tfor i := 0; it.Next(); i++ {\n\t\tkeys[i] = it.Key()\n\t}\n\treturn keys\n}", "func orderStackGroupKeys(m map[string]StackGroup) []string {\n\tret := make([]string, len(m))\n\ti := 0\n\n\tfor k := range m {\n\t\tret[i] = k\n\t\ti++\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(ret)))\n\treturn ret\n}", "func (m *jobMap) Keys() []string {\n\tvar keys []string\n\tm.Range(func(key string, value *Job) bool {\n\t\tkeys = append(keys, key)\n\t\treturn true\n\t})\n\tsort.Strings(keys)\n\treturn keys\n}", "func (ilos IndexedListOfStrings) Keys() []uint {\n\tlength := len(ilos)\n\tif length <= 0 {\n\t\treturn []uint{}\n\t}\n\n\tkeys := make([]uint, 0, length)\n\tfor k := range ilos {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (ns *Namespace) ListKeys() []string {\n\tkeys := make([]string, len(ns.ns))\n\tidx := 0\n\tfor k, _ := range ns.ns {\n\t\tkeys[idx] = k\n\t\tidx += 1\n\t}\n\treturn keys\n}", "func (tx *Tx) Keys() []string {\n\tkeys := make([]string, 0, len(tx.buckets))\n\tfor n := range tx.buckets {\n\t\tkeys = append(keys, n)\n\t}\n\treturn keys\n}", "func (obj *object) Keys() []*Term {\n\tkeys := make([]*Term, len(obj.keys))\n\n\tfor i, elem := range obj.sortedKeys() {\n\t\tkeys[i] = elem.key\n\t}\n\n\treturn keys\n}", "func sortKeys(m map[byte]*Node) []byte {\n\tkeys := make([]byte, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Slice(keys, func(i, j int) bool {\n\t\treturn keys[i] < keys[j]\n\t})\n\n\treturn keys\n}", "func (k *KeyValue) GetKeys() (keys []string) {\n\tfor key := range k.Raw {\n\t\tkeys = append(keys, key)\n\t}\n\treturn\n}", "func (s *Store) getKeysSortedByRank() (result []string) {\n\ttype pair struct {\n\t\tid string\n\t\trank int\n\t}\n\tsorting := make([]pair, 0)\n\n\tfor key, entry := range *s {\n\t\tsorting = append(sorting, pair{key, entry.rank()})\n\t}\n\n\tsort.Slice(sorting, func(i, j int) bool {\n\t\treturn sorting[i].rank < sorting[j].rank\n\t})\n\n\tresult = make([]string, 0)\n\tfor _, sortingEntry := range sorting {\n\t\tresult = append(result, sortingEntry.id)\n\t}\n\n\treturn result\n}", "func (p OrderedMap) Keys() []interface{} {\n\treturn p.keys\n}", "func GetSortedKeySlice(m map[string]string) []string {\n\tkeys := make([]string, 0)\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Sort(sort.StringSlice(keys))\n\treturn keys\n}", "func (d *DiskIOState) GetTags() []string {\n\tvar tags []string\n\tvalueMap := structs.Map(d)\n\tfor k := range valueMap {\n\t\ttags = append(tags, k)\n\t}\n\n\treturn tags\n}", "func (m Packages) Keys() (keys []string) {\n\tfor name := range m {\n\t\tkeys = append(keys, name)\n\t}\n\n\treturn\n}", "func (m *OrderedMap) Keys() []string {\n\tkeys := []string{}\n\tfor k, _ := range m.Map() {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (fs *FileStore) Keys() []string {\n\ts := fs.getDirContents(\"\")\n\tsort.Strings(s)\n\treturn s\n}", "func (mm Uint64Uint64Map) Keys() Uint64List {\n\tif mm == nil {\n\t\treturn nil\n\t}\n\n\ts := make(Uint64List, 0, len(mm))\n\tfor k := range mm {\n\t\ts = append(s, k)\n\t}\n\treturn s\n}", "func (s StringSet) Keys() []string {\n\tret := make([]string, 0, len(s))\n\tfor v := range s {\n\t\tret = append(ret, v)\n\t}\n\treturn ret\n}", "func (o *FiltersVirtualGateway) GetTagKeysOk() ([]string, bool) {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.TagKeys, true\n}", "func (d *Descriptor) Keys() []core.Key {\n\treturn d.keys\n}", "func (i *BTreeIndex) Keys(from string, n int) []string {\r\n\ti.RLock()\r\n\tdefer i.RUnlock()\r\n\r\n\tif i.BTree == nil || i.LessFunction == nil {\r\n\t\tpanic(\"uninitialized index\")\r\n\t}\r\n\r\n\tif i.BTree.Len() <= 0 {\r\n\t\treturn []string{}\r\n\t}\r\n\r\n\tbtreeFrom := btreeString{s: from, l: i.LessFunction}\r\n\tskipFirst := true\r\n\tif len(from) <= 0 || !i.BTree.Has(btreeFrom) {\r\n\t\t// no such key, so fabricate an always-smallest item\r\n\t\tbtreeFrom = btreeString{s: \"\", l: func(string, string) bool { return true }}\r\n\t\tskipFirst = false\r\n\t}\r\n\r\n\tkeys := []string{}\r\n\titerator := func(i btree.Item) bool {\r\n\t\tkeys = append(keys, i.(btreeString).s)\r\n\t\treturn len(keys) < n\r\n\t}\r\n\ti.BTree.AscendGreaterOrEqual(btreeFrom, iterator)\r\n\r\n\tif skipFirst && len(keys) > 0 {\r\n\t\tkeys = keys[1:]\r\n\t}\r\n\r\n\treturn keys\r\n}", "func GetKeys(mapping map[string]string) []string {\n\tvar arrayToReturn []string\n\n\tfor key := range mapping {\n\t\twalletBalance := stringToBigInt(tokenLedger[key])\n\n\t\t// Ignore zero balance addresses if the IgnoreZeroBalance value is flagged\n\t\tif !IgnoreZeroBalance {\n\t\t\tarrayToReturn = append(arrayToReturn, key)\n\t\t} else if walletBalance.Cmp(big.NewInt(0)) == 1 {\n\t\t\tarrayToReturn = append(arrayToReturn, key)\n\t\t}\n\t}\n\n\treturn arrayToReturn\n}", "func (object Object) Keys() []string {\n\tvar keys []string\n\tfor key, _ := range object {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func sortKeys(vals map[string]counter) (keys []string) {\n\tfor k := range vals {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\treturn keys\n}", "func (m Object) Keys() []string {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (d *Driver) getKeys(ctx context.Context, prefix string, sortResults bool) ([]string, error) {\n\twatcher, err := d.kv.Watch(prefix, nats.MetaOnly(), nats.IgnoreDeletes(), nats.Context(ctx))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\terr := watcher.Stop()\n\t\tif err != nil {\n\t\t\tlogrus.Warnf(\"failed to stop %s getKeys watcher\", prefix)\n\t\t}\n\t}()\n\n\tvar keys []string\n\t// grab all matching keys immediately\n\tfor entry := range watcher.Updates() {\n\t\tif entry == nil {\n\t\t\tbreak\n\t\t}\n\t\tkeys = append(keys, entry.Key())\n\t}\n\n\tif sortResults {\n\t\tsort.Strings(keys)\n\t}\n\n\treturn keys, nil\n}", "func ListTags() []string {\n\treturn _tags\n}", "func (m QuantileMap) Keys() []float64 {\n\tresult := make([]float64, 0, len(m))\n\tfor q := range m {\n\t\tresult = append(result, q)\n\t}\n\tsort.Float64s(result)\n\treturn result\n}", "func (s StringKeyMap) Keys() []string {\n\tkeys := make([]string, len(s))\n\ti := 0\n\tfor k := range s {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn keys\n}", "func (t *Set) Keys() [][]byte {\n\tkeys := make([][]byte, 0, t.size)\n\n\t// empty tree?\n\tif t.Empty() {\n\t\treturn keys\n\t}\n\n\t// Walk the tree without function recursion\n\tto_visit := make([]*Ref, 1)\n\n\t// Walk the left side of the root\n\tp := &t.root\n\tto_visit[0] = p\n\n\tfor l := len(to_visit); l > 0; l = len(to_visit) {\n\t\t// shift the list to get the first item\n\n\t\tp = to_visit[l-1]\n\t\tto_visit = to_visit[:l-1]\n\n\t\t// leaf?\n\t\tif p.node == nil {\n\t\t\tkeys = append(keys, p.Key)\n\t\t} else {\n\t\t\t// unshift the children and continue\n\t\t\tto_visit = append(to_visit, &p.node.child[1], &p.node.child[0])\n\t\t}\n\t}\n\treturn keys\n}", "func ShowTagKeys() *ShowTagKeysBuilder {\n\treturn &ShowTagKeysBuilder{}\n}", "func (connector *DbConnector) GetTagNames() ([]string, error) {\n\tc := connector.pool.Get()\n\tdefer c.Close()\n\n\ttagNames, err := redis.Strings(c.Do(\"SMEMBERS\", tagsKey))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to retrieve tags: %s\", err.Error())\n\t}\n\treturn tagNames, nil\n}", "func getKeys(data map[string]interface{}) []string {\n\tvar list []string\n\tfor key := range data {\n\t\tlist = append(list, key)\n\t}\n\treturn list\n}", "func newLogTagKeyIterator(a []logTagKey) *logTagKeyIterator {\n\tsort.Sort(logTagKeySlice(a))\n\treturn &logTagKeyIterator{a: a}\n}", "func (dict *Dictionary) GetKeys() []DictKey {\n\tdict.lock.RLock()\n\tdefer dict.lock.RUnlock()\n\tvar dictKeys []DictKey\n\tdictKeys = []DictKey{}\n\tvar key DictKey\n\tfor key = range dict.elements {\n\t\tdictKeys = append(dictKeys, key)\n\t}\n\treturn dictKeys\n}", "func (s *DeleteTagsInput) SetTagKeys(v []*string) *DeleteTagsInput {\n\ts.TagKeys = v\n\treturn s\n}", "func (s *DeleteTagsInput) SetTagKeys(v []*string) *DeleteTagsInput {\n\ts.TagKeys = v\n\treturn s\n}", "func (o MongoIndexKeysOutput) Keys() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v MongoIndexKeys) []string { return v.Keys }).(pulumi.StringArrayOutput)\n}", "func (t Tags) Names() []string {\n\tset := map[string]string{}\n\tfor _, s := range t {\n\t\tset[s.Name] = \"\"\n\t}\n\n\tres := make([]string, 0, len(set))\n\n\tfor k := range set {\n\t\tres = append(res, k)\n\t}\n\n\tsort.Strings(res)\n\n\treturn res\n}", "func (o *FiltersNet) GetTagKeysOk() ([]string, bool) {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.TagKeys, true\n}", "func (s *UntagQueueInput) SetTagKeys(v []*string) *UntagQueueInput {\n\ts.TagKeys = v\n\treturn s\n}", "func (ms *MemStore) Keys() []string {\n\tms.mu.RLock()\n\ts := make([]string, 0, len(ms.data))\n\tfor key := range ms.data {\n\t\ts = append(s, key)\n\t}\n\tms.mu.RUnlock()\n\tsort.Strings(s)\n\treturn s\n}", "func sortedKeys(v any) []any {\n\tkeys := reflect.ValueOf(v).MapKeys()\n\tsort.Slice(keys, func(i, j int) bool {\n\t\ta := keys[i].Convert(u32).Interface().(uint32)\n\t\tb := keys[j].Convert(u32).Interface().(uint32)\n\t\treturn a < b\n\t})\n\tvals := make([]any, len(keys))\n\tfor i, key := range keys {\n\t\tvals[i] = key.Interface()\n\t}\n\treturn vals\n}", "func (t *Map) Keys() []interface{} {\n\tkeys := make([]interface{}, t.keys.Len())\n\tfor e, i := t.keys.Front(), 0; e != nil; e, i = e.Next(), i+1 {\n\t\tkeys[i] = e.Value\n\t}\n\treturn keys\n}" ]
[ "0.809159", "0.77590525", "0.767649", "0.76697975", "0.7639584", "0.75801903", "0.7515929", "0.72973794", "0.68380284", "0.67081636", "0.6610359", "0.6471081", "0.64383274", "0.6395813", "0.62597847", "0.62516946", "0.6238408", "0.6233503", "0.61932665", "0.6141229", "0.6127683", "0.61113447", "0.606008", "0.60588443", "0.60206413", "0.60082674", "0.5987264", "0.5979341", "0.5958314", "0.5957262", "0.5939236", "0.59385973", "0.59307694", "0.59271646", "0.5912006", "0.59097064", "0.588356", "0.5879682", "0.5866378", "0.5866168", "0.5865698", "0.58644235", "0.583444", "0.5830016", "0.5823565", "0.5822271", "0.58144575", "0.58084875", "0.58030504", "0.5802216", "0.57581437", "0.57538354", "0.5753708", "0.57512915", "0.57474405", "0.57363206", "0.57265735", "0.57068026", "0.56966394", "0.56836146", "0.5683382", "0.567621", "0.56748426", "0.56739205", "0.56684864", "0.5658795", "0.5653943", "0.5649971", "0.56494075", "0.5644749", "0.5642343", "0.5623384", "0.5619852", "0.5615889", "0.56030494", "0.5601244", "0.559808", "0.5596189", "0.55954266", "0.5593803", "0.5575967", "0.5565905", "0.5565087", "0.5556603", "0.55535257", "0.55499977", "0.5543333", "0.55339855", "0.55300915", "0.55225354", "0.5521185", "0.5509837", "0.5509035", "0.55076337", "0.5505781", "0.55046964", "0.5498808", "0.549719", "0.54874814", "0.547805" ]
0.77027154
2
marshalStrings encodes an array of strings into a byte slice.
func marshalStrings(a []string) (ret []byte) { for _, s := range a { // Create a slice for len+data b := make([]byte, 2+len(s)) binary.BigEndian.PutUint16(b[0:2], uint16(len(s))) copy(b[2:], s) // Append it to the full byte slice. ret = append(ret, b...) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MarshalStringSlice(item Any) []*string {\n\tvar data []*string\n\n\tswitch reflect.TypeOf(item).Kind() {\n\tcase reflect.Slice:\n\t\tval := reflect.ValueOf(item)\n\t\tmax := val.Len()\n\t\tfor i := 0; i < max; i++ {\n\t\t\ts := fmt.Sprint(val.Index(i).Interface())\n\t\t\tdata = append(data, &s)\n\t\t}\n\t}\n\treturn data\n}", "func (e *Encoder) encodeStringArray(v []string) error {\n\t// Special case for a nil array\n\tif v == nil {\n\t\treturn e.encodePrefixed('*', \"-1\")\n\t}\n\n\t// First encode the number of elements\n\tn := len(v)\n\tif err := e.encodePrefixed('*', strconv.Itoa(n)); err != nil {\n\t\treturn err\n\t}\n\n\t// Then encode each value\n\tfor _, el := range v {\n\t\tif err := e.encodeBulkString(BulkString(el)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func bindStrings(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(StringsABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func EncodeBulkString(s string) []byte {\n\tif len(s) > bulkStringMaxLength {\n\t\tpanic(\"BulkString is over 512 MB\")\n\t}\n\treturn []byte(typeBulkStrings + strconv.Itoa(len(s)) + crlf + s + crlf)\n}", "func (e *Encoder) marshalSlice(val reflect.Value, child bool) (string, bool) {\n\tvar ok bool\n\tif ok = supportedBaseKind(val); !ok {\n\t\treturn \"\", false\n\t}\n\tvar sl, str string\n\t// check the type of slice and handle\n\tfor j := 0; j < val.Len(); j++ {\n\t\tstr = \"\"\n\t\tstr, ok = e.stringify(val.Index(j), child)\n\t\tif !ok {\n\t\t\treturn \"\", false\n\t\t}\n\t\tif j == 0 {\n\t\t\tsl = str\n\t\t\tcontinue\n\t\t}\n\t\tsl = fmt.Sprintf(\"%s,%s\", sl, str)\n\t}\n\tif child {\n\t\tsl = fmt.Sprintf(\"%s%s%s\", e.sepBeg, sl, e.sepEnd)\n\t}\n\treturn sl, true\n}", "func stringToBytes(s string) (bytes []byte) {\n\tstr := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tslice := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))\n\tslice.Data = str.Data\n\tslice.Len = str.Len\n\treturn bytes\n}", "func bindStrings(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := ParsedABI(K_Strings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil\n}", "func bindStrings(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := ParsedABI(K_Strings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil\n}", "func WriteBytesStr(b []byte, w io.Writer, array bool) error {\n\tvar prefix string\n\tif array {\n\t\tprefix = \"[\" + strconv.Itoa(len(b)) + \"]byte{\"\n\t} else {\n\t\tprefix = \"[]byte{\"\n\t}\n\treturn writeBytesStr(b, prefix, w)\n}", "func StringToBytes(s string) []byte {\n\tsh := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tb := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{\n\t\tData: sh.Data,\n\t\tLen: sh.Len,\n\t\tCap: sh.Len,\n\t}))\n\t// ensure the underlying string doesn't get GC'ed before the assignment happens\n\truntime.KeepAlive(&s)\n\n\treturn b\n}", "func StringToBytes(s string) []byte {\n\tsh := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tbh := reflect.SliceHeader{sh.Data, sh.Len, 0}\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func StringToBytes(s string) []byte {\n\tsh := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tbh := reflect.SliceHeader{sh.Data, sh.Len, 0}\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func (s StringSlice) MarshalJSON() ([]byte, error) {\n\tif err := s.IsValueValid(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(s.value)\n\n}", "func string2bytes(s string) []byte {\n\tstringHeader := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\n\tbh := reflect.SliceHeader{\n\t\tData: stringHeader.Data,\n\t\tLen: stringHeader.Len,\n\t\tCap: stringHeader.Len,\n\t}\n\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func encString(b []byte, v string) []byte {\n\tvar (\n\t\tvLen int\n\n\t\tvBuf = *bytes.NewBufferString(v)\n\n\t\tvChunk = func(length int) {\n\t\t\tfor i := 0; i < length; i++ {\n\t\t\t\tif r, s, err := vBuf.ReadRune(); s > 0 && err == nil {\n\t\t\t\t\t// b = append(b, []byte(string(r))...)\n\t\t\t\t\tb = append(b, Slice(string(r))...) // converts it to []byte in memory space of \"r\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t)\n\n\tif v == \"\" {\n\t\treturn encByte(b, BC_STRING_DIRECT)\n\t}\n\n\tfor {\n\t\tvLen = utf8.RuneCount(vBuf.Bytes())\n\t\tif vLen == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif vLen > CHUNK_SIZE {\n\t\t\tb = encByte(b, BC_STRING_CHUNK)\n\t\t\tb = encByte(b, PackUint16(uint16(CHUNK_SIZE))...)\n\t\t\tvChunk(CHUNK_SIZE)\n\t\t} else {\n\t\t\tif vLen <= int(STRING_DIRECT_MAX) {\n\t\t\t\tb = encByte(b, byte(vLen+int(BC_STRING_DIRECT)))\n\t\t\t} else if vLen <= int(STRING_SHORT_MAX) {\n\t\t\t\tb = encByte(b, byte((vLen>>8)+int(BC_STRING_SHORT)), byte(vLen))\n\t\t\t} else {\n\t\t\t\tb = encByte(b, BC_STRING)\n\t\t\t\tb = encByte(b, PackUint16(uint16(vLen))...)\n\t\t\t}\n\t\t\tvChunk(vLen)\n\t\t}\n\t}\n\n\treturn b\n}", "func StringToBytes(s string) []byte {\n\tstrstruct := stringStructOf(&s)\n\treturn *(*[]byte)(unsafe.Pointer(&sliceType2{\n\t\tArray: strstruct.Str,\n\t\tLen: strstruct.Len,\n\t\tCap: strstruct.Len,\n\t}))\n}", "func stringToBytes(in string) []byte {\n\tvar buf bytes.Buffer\n\trunes := []rune(in)\n\n\tbinary.Write(&buf, binary.BigEndian, int16(len(runes)))\n\tfor _, r := range runes {\n\t\tbinary.Write(&buf, binary.BigEndian, uint16(r))\n\t}\n\treturn buf.Bytes()\n}", "func (o Strings) ToBytes() []byte {\n\tbuf := uio.NewBigEndianBuffer(nil)\n\tfor _, uc := range o {\n\t\tbuf.Write8(uint8(len(uc)))\n\t\tbuf.WriteBytes([]byte(uc))\n\t}\n\treturn buf.Data()\n}", "func packSString(v []string, ptr0 **C.char) {\n\tconst m = 0x7fffffff\n\tfor i0 := range v {\n\t\tptr1 := (*(*[m / sizeOfPtr]*C.char)(unsafe.Pointer(ptr0)))[i0]\n\t\tv[i0] = packPCharString(ptr1)\n\t}\n}", "func EncodeBinarySlice(seed []byte) []string {\n\twords := make([]string, len(seed)+1) // Extra word for checksumByte\n\tfor i, b := range seed {\n\t\twords[i] = strconv.FormatInt(int64(b), 2)\n\t}\n\tchecksum := checksumByte(seed)\n\twords[len(words)-1] = strconv.FormatInt(int64(checksum), 2)\n\treturn words\n}", "func (bw *BufWriter) Array(slice []interface{}) {\n\tif bw.Error != nil {\n\t\treturn\n\t}\n\tbw.stringBuf, bw.Error = Array(slice, bw.stringBuf[:0])\n\tif bw.Error != nil {\n\t\treturn\n\t}\n\t_, bw.Error = bw.writer.Write(bw.stringBuf)\n}", "func stringToMultiStringHexBytes(values []string) string {\n\tvalueString := strings.Join(values, \"\\x00\")\n\thex := utf16.Encode([]rune(valueString))\n\thexChars := make([]string, len(hex)*2)\n\tfor i, h := range hex {\n\t\ts := fmt.Sprintf(\"%04x\", h)\n\t\thexChars[2*i] = s[2:4]\n\t\thexChars[2*i+1] = s[0:2]\n\t}\n\treturn strings.Join(hexChars, \",\") + \",00,00,00,00\"\n}", "func marshalBinary(b *strings.Builder, bs []byte) error {\n\tif err := b.WriteByte(':'); err != nil {\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, base64.StdEncoding.EncodedLen(len(bs)))\n\tbase64.StdEncoding.Encode(buf, bs)\n\n\tif _, err := b.Write(buf); err != nil {\n\t\treturn err\n\t}\n\n\treturn b.WriteByte(':')\n}", "func StringToBytes(s string) []byte {\n\treturn *(*[]byte)(unsafe.Pointer(&s))\n}", "func (bs ByteSlice) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(bs.String())\n}", "func StringBytes(s string) []byte {\n\treturn []byte(s)\n}", "func StringBytes(s string) []byte {\n\treturn *(*[]byte)(unsafe.Pointer(&s))\n}", "func (e *encoder) marshalArray(t reflect.Type, v reflect.Value, n nestedTypeData) error {\n\telemType := t.Elem()\n\tfor i := 0; i < t.Len(); i++ {\n\t\tif err := e.marshal(elemType, v.Index(i), n); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func escapeStrings(s []string) []string {\n\tescaped := make([]string, len(s))\n\n\tfor i, v := range s {\n\t\tescaped[i] = escapeString(v)\n\t}\n\n\treturn escaped\n}", "func (e *encoder) marshalString(v reflect.Value, n nestedTypeData) error {\n\tif !v.IsValid() {\n\t\te.writeUint(0, 8)\n\t\te.writeUint(noAlloc, 8)\n\t\treturn nil\n\t}\n\ts := v.String()\n\tmax := n.Unnest()\n\tif max != nil && len(s) > *max {\n\t\treturn newExpectError(ErrStringTooLong, *max, len(s))\n\t}\n\te.writeUint(uint64(len(s)), 8)\n\te.writeUint(allocPresent, 8)\n\n\t// Create a new out-of-line object and write bytes of the string.\n\thead := e.newObject(len(s))\n\tfor i := 0; i < len(s); i++ {\n\t\te.buffer[head+i] = s[i]\n\t}\n\treturn nil\n}", "func StringBytes(b []byte) string { return *(*string)(Pointer(&b)) }", "func StringToBytes(s string) []byte {\n\tx := (*[2]uintptr)(unsafe.Pointer(&s))\n\th := [3]uintptr{x[0], x[1], x[1]}\n\treturn *(*[]byte)(unsafe.Pointer(&h))\n}", "func BytesString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func Strings(values []string) dgo.Array {\n\tcp := make([]dgo.Value, len(values))\n\tfor i := range values {\n\t\tcp[i] = makeHString(values[i])\n\t}\n\treturn &array{slice: cp, frozen: true}\n}", "func MarshalBytes(dst, b []byte) []byte {\n\tdst = MarshalVarUint64(dst, uint64(len(b)))\n\tdst = append(dst, b...)\n\treturn dst\n}", "func string2ByteSlice(str string) (bs []byte) {\n\tstrHdr := (*reflect.StringHeader)(unsafe.Pointer(&str))\n\tsliceHdr := (*reflect.SliceHeader)(unsafe.Pointer(&bs))\n\tsliceHdr.Data = strHdr.Data\n\tsliceHdr.Len = strHdr.Len\n\tsliceHdr.Cap = strHdr.Len\n\t// This KeepAlive line is essential to make the\n\t// String2ByteSlice function be always valid\n\t// when it is provided in other custom packages.\n\truntime.KeepAlive(&str)\n\treturn\n}", "func UnsafeStringBytes(s *string) []byte {\n\treturn *(*[]byte)(unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(s))))\n}", "func (m *mmapData) pushStringSlice(ss []string) **C.char {\n\tarr := unsafe.Pointer(&m.data[m.offset])\n\tnextptr := arr\n\tm.offset += ptrSize * (len(ss) + 1)\n\tfor _, s := range ss {\n\t\tptr := m.pushString(s)\n\t\t*(*unsafe.Pointer)(nextptr) = unsafe.Pointer(ptr)\n\t\tnextptr = unsafe.Pointer(uintptr(nextptr) + uintptr(ptrSize))\n\t}\n\t*(*unsafe.Pointer)(nextptr) = unsafe.Pointer(nil)\n\tm.panicIfOverAllocated()\n\treturn (**C.char)(arr)\n}", "func StringToBytes(s string) []byte {\n\tsp := *(*[2]uintptr)(unsafe.Pointer(&s))\n\tbp := [3]uintptr{sp[0], sp[1], sp[1]}\n\treturn *(*[]byte)(unsafe.Pointer(&bp))\n}", "func StringToBytes(s string) []byte {\n\tsp := *(*[2]uintptr)(unsafe.Pointer(&s))\n\tbp := [3]uintptr{sp[0], sp[1], sp[1]}\n\treturn *(*[]byte)(unsafe.Pointer(&bp))\n}", "func StringToBytes(s string) []byte {\n\tsp := *(*[2]uintptr)(unsafe.Pointer(&s))\n\tbp := [3]uintptr{sp[0], sp[1], sp[1]}\n\treturn *(*[]byte)(unsafe.Pointer(&bp))\n}", "func DeployStrings(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Strings, error) {\n\tparsed, err := abi.JSON(strings.NewReader(StringsABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(StringsBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &Strings{StringsCaller: StringsCaller{contract: contract}, StringsTransactor: StringsTransactor{contract: contract}, StringsFilterer: StringsFilterer{contract: contract}}, nil\n}", "func StringToByteSlice(s *string) []byte {\n\tvar bytes []byte\n\tstringHeader := (*reflect.StringHeader)(unsafe.Pointer(s))\n\tbytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))\n\tbytesHeader.Data = stringHeader.Data\n\tbytesHeader.Len = stringHeader.Len\n\tbytesHeader.Cap = stringHeader.Len\n\treturn bytes\n}", "func DeployStrings(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Strings, error) {\n\tparsed, err := ParsedABI(K_Strings)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\tif parsed == nil {\n\t\treturn common.Address{}, nil, nil, errors.New(\"GetABI returned nil\")\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(StringsBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &Strings{StringsCaller: StringsCaller{contract: contract}, StringsTransactor: StringsTransactor{contract: contract}, StringsFilterer: StringsFilterer{contract: contract}}, nil\n}", "func DeployStrings(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Strings, error) {\n\tparsed, err := ParsedABI(K_Strings)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\tif parsed == nil {\n\t\treturn common.Address{}, nil, nil, errors.New(\"GetABI returned nil\")\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(StringsBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &Strings{StringsCaller: StringsCaller{contract: contract}, StringsTransactor: StringsTransactor{contract: contract}, StringsFilterer: StringsFilterer{contract: contract}}, nil\n}", "func Str2bytes(s string) []byte {\n\tstringHeader := *(*[2]int)(unsafe.Pointer(&s))\n\tvar sliceHeader [3]int\n\tsliceHeader[0] = stringHeader[0]\n\tsliceHeader[1] = stringHeader[1]\n\tsliceHeader[2] = stringHeader[1]\n\treturn *(*[]byte)(unsafe.Pointer(&sliceHeader))\n}", "func encodeByteSlice(w io.Writer, bz []byte) (err error) {\n\terr = encodeVarint(w, int64(len(bz)))\n\tif err != nil {\n\t\treturn\n\t}\n\t_, err = w.Write(bz)\n\treturn\n}", "func TestStringToBin(t *testing.T) {\n\ttestStrings := [3]string{\n\t\t\"abcdefghijklmnopqrstuvxyz\",\n\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\",\n\t\t\"1234567890!@#$%^&*),./?\",\n\t}\n\n\texpectedStrings := [3]string{\n\t\t\"01100001011000100110001101100100011001010110011001100111011010000110100101101010011010110110110001101101011011100110111101110000011100010111001001110011011101000111010101110110011110000111100101111010\",\n\t\t\"0100000101000010010000110100010001000101010001100100011101001000010010010100101001001011010011000100110101001110010011110101000001010001010100100101001101010100010101010101011001010111010110000101100101011010\",\n\t\t\"0011000100110010001100110011010000110101001101100011011100111000001110010011000000100001010000000010001100100100001001010101111000100110001010100010100100101100001011100010111100111111\",\n\t}\n\n\tfor i, input := range testStrings {\n\t\toutput := stringToBin(input)\n\t\tif output != expectedStrings[i] {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}", "func unmarshalStrings(b []byte) (ret []string) {\n\tfor {\n\t\t// If there's no more data then exit.\n\t\tif len(b) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\t// Decode size + data.\n\t\tn := binary.BigEndian.Uint16(b[0:2])\n\t\tret = append(ret, string(b[2:n+2]))\n\n\t\t// Move the byte slice forward and retry.\n\t\tb = b[n+2:]\n\t}\n}", "func StringToByteArray(v string) []byte {\n\tvar slcHdr reflect.SliceHeader\n\tsh := *(*reflect.StringHeader)(unsafe.Pointer(&v))\n\tslcHdr.Data = sh.Data\n\tslcHdr.Cap = sh.Len\n\tslcHdr.Len = sh.Len\n\treturn *(*[]byte)(unsafe.Pointer(&slcHdr))\n}", "func (s *StringArray) Serialize() ([]byte, error) {\n\tb := make([]byte, s.Len())\n\tif err := s.SerializeTo(b); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}", "func MarshalTinySlice(w io.Writer, v interface{}) error {\n\tval := reflect.ValueOf(v)\n\tswitch k := val.Kind(); k {\n\tcase reflect.Slice:\n\t\tl := val.Len()\n\t\tif l > math.MaxUint8 {\n\t\t\treturn fmt.Errorf(\"a tiny slice can have a maximum of %d elements\", math.MaxUint8)\n\t\t}\n\t\t// slices are variable length, so prepend the length\n\t\terr := MarshalUint8(w, uint8(l))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif l == 0 {\n\t\t\t// if length is 0, we are done\n\t\t\treturn nil\n\t\t}\n\t\t// special case for byte slices\n\t\tif val.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t// if the array is addressable, we can optimize a bit here\n\t\t\tif val.CanAddr() {\n\t\t\t\treturn marshalBytes(w, val.Slice(0, val.Len()).Bytes())\n\t\t\t}\n\t\t\t// otherwise we have to copy into a newly allocated slice\n\t\t\tslice := reflect.MakeSlice(reflect.SliceOf(val.Type().Elem()), val.Len(), val.Len())\n\t\t\treflect.Copy(slice, val)\n\t\t\treturn marshalBytes(w, slice.Bytes())\n\t\t}\n\t\t// create an encoder and encode all slice elements in the regular Sia-encoding way\n\t\te := NewEncoder(w)\n\t\t// normal slices are encoded by sequentially encoding their elements\n\t\tfor i := 0; i < l; i++ {\n\t\t\terr = e.Encode(val.Index(i).Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\n\tcase reflect.String:\n\t\treturn MarshalTinySlice(w, []byte(val.String()))\n\n\tdefault:\n\t\treturn fmt.Errorf(\"MarshalTinySlice: non-slice type %s (kind: %s) is not supported\",\n\t\t\tval.Type().String(), k.String())\n\t}\n}", "func (e *Encoder) encodeBulkString(v BulkString) error {\n\tn := len(v)\n\tdata := strconv.Itoa(n) + \"\\r\\n\" + string(v)\n\treturn e.encodePrefixed('$', data)\n}", "func bytesToBinaryString(slice []byte) string {\n\t// Convert each byte to its bits representation as string\n\tvar strBuff bytes.Buffer\n\tfor _, b := range(slice) {\n\t\tstrBuff.WriteString(fmt.Sprintf(\"%.8b\", b))\n\t}\n\n\treturn strBuff.String()\n}", "func StringPack(str string, out []byte) ([]byte, uint32) {\n\tif str == \"\" {\n\t\tout = append(out, 0)\n\t\treturn out, 1\n\t} else {\n\t\tvar length uint32 = uint32(len(str))\n\t\tvar rv uint32\n\t\tout, rv = Uint32Pack(length, out)\n\n\t\tfor _, c := range str {\n\t\t\tout = append(out, byte(c))\n\t\t}\n\n\t\treturn out, rv + length\n\t}\n}", "func runesToBytes(rs []rune) []byte {\n\t// Calculate how large the slice needs to be\n\tsize := 0\n\tfor _, r := range rs {\n\t\tsize += utf8.RuneLen(r)\n\t}\n\n\t// Allocate it\n\tbs := make([]byte, size)\n\n\t// Now do the conversion to a byte slice\n\tcount := 0\n\tfor _, r := range rs {\n\t\tcount += utf8.EncodeRune(bs[count:], r)\n\t}\n\n\treturn bs\n}", "func encode(vs []interface{}) ([]string, error) {\n\tstrings := make([]string, len(vs))\n\n\tfor i, v := range vs {\n\t\t// vs[i] = str\n\t\tswitch t := v.(type) {\n\t\tcase string:\n\t\t\tstrings[i] = t\n\t\tcase int:\n\t\t\tstrings[i] = strconv.Itoa(t)\n\t\tcase int64:\n\t\t\tstrings[i] = strconv.Itoa(int(t))\n\t\tcase float64:\n\t\t\tstrings[i] = strconv.FormatFloat(t, 'f', -1, 64)\n\t\tcase []interface{}:\n\t\t\tif data, err := json.Marshal(t); err == nil {\n\t\t\t\tstrings[i] = string(data)\n\t\t\t}\n\t\tcase map[string]interface{}:\n\t\t\tif data, err := json.Marshal(t); err == nil {\n\t\t\t\tstrings[i] = string(data)\n\t\t\t}\n\t\tcase bool:\n\t\t\tif t {\n\t\t\t\tstrings[i] = \"true\"\n\t\t\t} else {\n\t\t\t\tstrings[i] = \"false\"\n\t\t\t}\n\t\tcase nil:\n\t\t\tstrings[i] = \"\"\n\t\t}\n\t}\n\n\treturn strings, nil\n}", "func (b BulkStringBytes) MarshalRESP(w io.Writer) error {\n\tif b.B == nil && !b.MarshalNotNil {\n\t\t_, err := w.Write(nilBulkString)\n\t\treturn err\n\t}\n\tscratch := bytesutil.GetBytes()\n\t*scratch = append(*scratch, BulkStringPrefix...)\n\t*scratch = strconv.AppendInt(*scratch, int64(len(b.B)), 10)\n\t*scratch = append(*scratch, delim...)\n\t*scratch = append(*scratch, b.B...)\n\t*scratch = append(*scratch, delim...)\n\t_, err := w.Write(*scratch)\n\tbytesutil.PutBytes(scratch)\n\treturn err\n}", "func (b ByteSlice) MarshalJSON() ([]byte, error) {\n\tif !b.Valid {\n\t\treturn []byte(\"null\"), nil\n\t}\n\t// Because we're passing a []byte into json.Marshal, the json package will\n\t// handle any base64 decoding that needs to happen.\n\treturn json.Marshal(b.ByteSlice)\n}", "func encodeByteSequence(v [][]byte) []byte {\n\tvar hexstrings []string\n\tfor _, a := range v {\n\t\thexstrings = append(hexstrings, hexutil.Encode(a))\n\t}\n\treturn []byte(strings.Join(hexstrings, \",\"))\n}", "func convertStringToByte(s string) []byte {\n\tvar b []byte\n\tstrHeader := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tsliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n\tsliceHeader.Data = strHeader.Data\n\tsliceHeader.Len = strHeader.Len\n\tsliceHeader.Cap = strHeader.Len\n\treturn b\n}", "func marshalTags(tags map[string]string) []byte {\n\t// Empty maps marshal to empty bytes.\n\tif len(tags) == 0 {\n\t\treturn nil\n\t}\n\n\t// Extract keys and determine final size.\n\tsz := (len(tags) * 2) - 1 // separators\n\tkeys := make([]string, 0, len(tags))\n\tfor k, v := range tags {\n\t\tkeys = append(keys, k)\n\t\tsz += len(k) + len(v)\n\t}\n\tsort.Strings(keys)\n\n\t// Generate marshaled bytes.\n\tb := make([]byte, sz)\n\tbuf := b\n\tfor _, k := range keys {\n\t\tcopy(buf, k)\n\t\tbuf[len(k)] = '|'\n\t\tbuf = buf[len(k)+1:]\n\t}\n\tfor i, k := range keys {\n\t\tv := tags[k]\n\t\tcopy(buf, v)\n\t\tif i < len(keys)-1 {\n\t\t\tbuf[len(v)] = '|'\n\t\t\tbuf = buf[len(v)+1:]\n\t\t}\n\t}\n\treturn b\n}", "func marshalEvents(events []event) ([]byte, error) {\n\tvar b strings.Builder\n\tb.WriteString(\"[\\n\")\n\tfor _, e := range events {\n\t\tb.WriteString(\" \")\n\t\tb.WriteString(formatIso(e.Time))\n\t\tb.WriteString(\": \")\n\t\tb.WriteString(e.Coins.String())\n\t\tb.WriteString(\"\\n\")\n\t}\n\tb.WriteString(\"]\")\n\treturn []byte(b.String()), nil\n}", "func StringToBytes(s string) (b []byte) {\n\treturn *(*[]byte)(unsafe.Pointer(\n\t\t&struct {\n\t\t\tstring\n\t\t\tCap int\n\t\t}{s, len(s)},\n\t))\n}", "func StringToBytes(str string) []byte {\n\t// Empty strings may not allocate a backing array, so we have to check first\n\tif len(str) == 0 {\n\t\t// It makes sense to return a non-nil empty byte slice since we're passing in a non-nil (although empty) string\n\t\treturn []byte{}\n\t}\n\treturn (*[0x7fff0000]byte)(unsafe.Pointer(\n\t\t(*reflect.StringHeader)(unsafe.Pointer(&str)).Data),\n\t)[:len(str):len(str)]\n}", "func Pack(data []interface{}) []byte {\n\tbuf := new(bytes.Buffer)\n\tfor _, v := range data {\n\t\terr := binary.Write(buf, binary.BigEndian, v)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"failed packing bytes:\", err)\n\t\t}\n\t}\n\treturn buf.Bytes()\n}", "func s2b(s string) []byte {\n\tsh := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tbh := reflect.SliceHeader{\n\t\tData: sh.Data,\n\t\tLen: sh.Len,\n\t\tCap: sh.Len,\n\t}\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func bytesToStrings(in []byte) []string {\n\ts := strings.TrimRight(string(in), \"\\n\")\n\tif s == \"\" { // empty (not {\"\"}, len=1)\n\t\treturn []string{}\n\t}\n\treturn strings.Split(s, \"\\n\")\n}", "func (e *Encoder) MarshalSlice(v reflect.Value) (int, error) {\n\tswitch v.Type().Elem().Kind() {\n\tcase reflect.Uint8:\n\t\tn, err := e.Byte(VarBinColumn)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ti, err := e.Binary(v.Bytes())\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn n + i, nil\n\tdefault:\n\t\tn, err := e.Byte(ArrayColumn)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tl := v.Len()\n\t\ts, err := e.Int16(int16(l))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tsize := n + s\n\t\tfor i := 0; i < l; i++ {\n\t\t\tc, err := e.Marshal(v.Index(i).Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tsize += c\n\t\t}\n\t\treturn size, nil\n\t}\n}", "func packBytes(bas... []byte) []byte {\n\tvar packed []byte\n\tfor _, ba := range bas {\n\t\tpacked = append(packed, ba...)\n\t}\n\treturn packed\n}", "func (w *RESPWriter) writeBulkStr(s []byte) {\n\tw.buf.WriteRune(respString)\n\tif len(s) > 0 {\n\t\tw.buf.WriteString(strconv.Itoa(len(s)))\n\t\tw.buf.Write(DELIMS)\n\t\tw.buf.Write(s)\n\t\tw.buf.Write(DELIMS)\n\t} else {\n\t\tw.buf.WriteString(\"-1\")\n\t\tw.buf.Write(DELIMS)\n\t\treturn\n\t}\n}", "func (s String) Bytes() []byte {\n\treturn []byte(s)\n}", "func marshalData(s interface{}) (b []byte) {\n\tv := reflect.ValueOf(s)\n\tif v.IsValid() && v.Kind() == reflect.Ptr {\n\t\tv = v.Elem()\n\t}\n\tif !v.IsValid() || v.Kind() != reflect.Struct {\n\t\tpanic(\"source must be a struct\")\n\t}\n\tvar arr32 [4]byte\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tswitch v.Type().Field(i).Type.Kind() {\n\t\tcase reflect.Uint32:\n\t\t\tbinary.BigEndian.PutUint32(arr32[:], uint32(v.Field(i).Uint()))\n\t\t\tb = append(b, arr32[:]...)\n\t\tcase reflect.String:\n\t\t\tbinary.BigEndian.PutUint32(arr32[:], uint32(v.Field(i).Len()))\n\t\t\tb = append(b, arr32[:]...)\n\t\t\tb = append(b, v.Field(i).String()...)\n\t\tdefault:\n\t\t\tpanic(\"invalid field type: \" + v.Type().Field(i).Type.String())\n\t\t}\n\t}\n\treturn b\n\n}", "func EncodeArray(data []string) []byte {\n\tvar buf []byte\n\tbuf = append(buf, arrayPrefix)\n\tbuf = strconv.AppendInt(buf, int64(len(data)), 10)\n\tbuf = append(buf, sep...)\n\tfor i := range data {\n\t\tbuf = append(buf, EncodeStr(data[i])...)\n\t}\n\n\treturn buf\n}", "func FlattenStrings(args ...interface{}) []string {\n\tflattened := flatten(nil, reflect.ValueOf(args))\n\ts := make([]string, 0, len(flattened))\n\tfor _, f := range flattened {\n\t\ts = append(s, f.(string))\n\t}\n\treturn s\n}", "func BinChopStrings(value string, slice []string) (bool, int){\n\tssc := stringSliceComp{slice}\n\treturn BinChop(value, 0, len(slice), ssc)\n}", "func StringToBytes(s string) ([]byte, error) {\n\tbuf := []byte(s)\n\tsize := len(buf)\n\n\tif len(buf) < 16 {\n\t\t// TinyString\n\t\tprefix := []byte{uint8(0x80 + len(buf))}\n\t\treturn bytes.Join([][]byte{\n\t\t\tprefix,\n\t\t\tbuf,\n\t\t}, []byte{}), nil\n\t}\n\n\tprefix := new(bytes.Buffer)\n\tvar data interface{}\n\tif size < 0x1000 {\n\t\tdata = 0xd000 + uint16(size)\n\t} else if size < 0x10000 {\n\t\tdata = 0xd00000 + uint32(size)\n\t} else {\n\t\tdata = 0xd0000000 + uint64(size)\n\t}\n\terr := binary.Write(prefix, binary.BigEndian, data)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn bytes.Join([][]byte{prefix.Bytes(), buf}, []byte{}), nil\n\n}", "func StrToBytes(s string) []byte {\n\tx := (*[2]uintptr)(unsafe.Pointer(&s))\n\th := [3]uintptr{x[0], x[1], x[1]}\n\treturn *(*[]byte)(unsafe.Pointer(&h))\n}", "func StringSliceToN1QLArray(values []string, quote string) string {\n\tif len(values) == 0 {\n\t\treturn \"\"\n\t}\n\tasString := fmt.Sprintf(\"%s%s%s\", quote, values[0], quote)\n\tfor i := 1; i < len(values); i++ {\n\t\tasString = fmt.Sprintf(\"%s,%s%s%s\", asString, quote, values[i], quote)\n\t}\n\treturn asString\n}", "func Strings(i interface{}) []string {\n\treturn xconv.Strings(i)\n}", "func UnsafeStrToBytes(s string) []byte {\n\treturn unsafe.Slice(unsafe.StringData(s), len(s)) // ref https://github.com/golang/go/issues/53003#issuecomment-1140276077\n}", "func UnsafeStrToBytes(s string) []byte {\n\treturn *(*[]byte)(unsafe.Pointer(&s))\n}", "func UnsafeStrToBytes(s string) []byte {\n\treturn *(*[]byte)(unsafe.Pointer(&s))\n}", "func StringSlicePtr(ss []string) []*byte {\n\tbb := make([]*byte, len(ss)+1)\n\tfor i := 0; i < len(ss); i++ {\n\t\tbb[i] = StringBytePtr(ss[i])\n\t}\n\tbb[len(ss)] = nil\n\treturn bb\n}", "func byteSlice(ns []uint32) []byte {\n\tb := make([]byte, len(ns))\n\tfor i, n := range ns {\n\t\tb[i] = byte(n)\n\t}\n\treturn b\n}", "func BytesToString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) }", "func B64ArrayToBytes(rawArray []string) []byte {\n\tdecoded, err := b64.StdEncoding.DecodeString(strings.Join(rawArray, \"\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn decoded\n}", "func SlicePtrFromStrings(ss []string) ([]*byte, error) {\n\tvar err error\n\tbb := make([]*byte, len(ss)+1)\n\tfor i := 0; i < len(ss); i++ {\n\t\tbb[i], err = BytePtrFromString(ss[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tbb[len(ss)] = nil\n\treturn bb, nil\n}", "func (cs CrypticString) MarshalTLS() ([]byte, error) {\n\tcrypticStringMarshalCalls += 1\n\n\tl := byte(len(cs))\n\tb := []byte(cs)\n\tfor i := range b {\n\t\tb[i] ^= l + byte(i) + 1\n\t}\n\treturn append([]byte{l}, b...), nil\n}", "func (c *fakeRedisConn) WriteBulkString(bulk string) { c.rsp = append(c.rsp, bulk) }", "func ByteSlice(s string) []byte {\n\tsh := *(*StringHeader)(unsafe.Pointer(&s))\n\tbh := SliceHeader{\n\t\tData: sh.Data,\n\t\tLen: sh.Len,\n\t\tCap: sh.Len,\n\t}\n\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func (a *ArrayBytes) String() string {\n\tx := a.a\n\tformattedBytes := make([]string, len(x))\n\tfor i, v := range x {\n\t\tformattedBytes[i] = v.String()\n\t}\n\treturn strings.Join(formattedBytes, \",\")\n}", "func StringSlice(input interface{}) (output []string, err error) {\n\tvar castError error\n\tswitch castValue := input.(type) {\n\tcase StringSlicer:\n\t\toutput = castValue.StringSlice()\n\t\treturn\n\tcase []string:\n\t\toutput = castValue\n\t\treturn\n\tcase []interface{}:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\tif output[index], castError = String(castValue[index]); castError != nil {\n\t\t\t\t//todo. return better error\n\t\t\t\terr = NewCastError(\"Could not convert to string\")\n\t\t\t}\n\t\t}\n\t\treturn\n\tcase []int:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []int8:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []int16:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []int32:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []int64:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint8:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint16:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint32:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint64:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []float32:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []float64:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []bool:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\n\tdefault:\n\t\terr = NewCastError(\"Could not convert to string\")\n\t}\n\treturn\n}", "func anyToBytes(i interface{}) []byte {\n\tif i == nil {\n\t\treturn nil\n\t}\n\tswitch value := i.(type) {\n\tcase string:\n\t\treturn []byte(value)\n\tcase []byte:\n\t\treturn value\n\tdefault:\n\t\treturn encode(i)\n\t}\n}", "func ToBytes(s string) []byte {\n\tif len(s) == 0 {\n\t\treturn []byte{}\n\t}\n\treturn Pad(append([]byte(s), 0))\n}", "func TestBinToString(t *testing.T) {\n\ttestStrings := [3]string{\n\t\t\"01100001011000100110001101100100011001010110011001100111011010000110100101101010011010110110110001101101011011100110111101110000011100010111001001110011011101000111010101110110011110000111100101111010\",\n\t\t\"0100000101000010010000110100010001000101010001100100011101001000010010010100101001001011010011000100110101001110010011110101000001010001010100100101001101010100010101010101011001010111010110000101100101011010\",\n\t\t\"0011000100110010001100110011010000110101001101100011011100111000001110010011000000100001010000000010001100100100001001010101111000100110001010100010100100101100001011100010111100111111\",\n\t}\n\n\texpectedStrings := [3]string{\n\t\t\"abcdefghijklmnopqrstuvxyz\",\n\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\",\n\t\t\"1234567890!@#$%^&*),./?\",\n\t}\n\n\tfor i, input := range testStrings {\n\t\toutput := binToString(input)\n\t\tif output != expectedStrings[i] {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}", "func IntsBytes(s []int, f func(s int) byte) bytes.Bytes {\n\tm := bytes.Bytes(make([]byte, len(s)))\n\tfor i, v := range s {\n\t\tm[i] = f(v)\n\t}\n\treturn m\n}", "func MarshalAll(v ...interface{}) (b []byte) {\n\tfor i := range v {\n\t\tb = append(b, Marshal(v[i])...)\n\t}\n\treturn\n}", "func (cas CompactAddresses) MarshalBinary() ([]byte, error) {\n\tss := make([]string, len(cas))\n\tfor i, addr := range cas {\n\t\tdata, err := addr.MarshalBinary()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tss[i] = string(data)\n\t}\n\treturn bencode.EncodeBytes(ss)\n}", "func StringSlice(slice Slice) (str string) {\n\tsb := poolStringBuilder.Get().(*StringBuilder)\n\tsb.Grow(ceil32(ByteSizeJSONSlice(slice)))\n\tEncodeJSONSlice(sb, slice)\n\tstr = sb.String()\n\tsb.Reset()\n\tpoolStringBuilder.Put(sb)\n\treturn\n}" ]
[ "0.6619493", "0.62545997", "0.57310075", "0.56902635", "0.56827354", "0.56382436", "0.5633135", "0.5633135", "0.56282467", "0.5622973", "0.5592934", "0.5592934", "0.5557227", "0.55521715", "0.55445236", "0.55348366", "0.5509634", "0.54764235", "0.54658085", "0.5456215", "0.54434323", "0.54348266", "0.54173666", "0.54083514", "0.54067653", "0.5381687", "0.53661245", "0.5362351", "0.5354527", "0.5327918", "0.5303581", "0.5298446", "0.5295953", "0.5295728", "0.5289991", "0.52882016", "0.5278575", "0.5255649", "0.52538496", "0.52538496", "0.52538496", "0.5227309", "0.5219575", "0.5182812", "0.5182812", "0.5171736", "0.5163738", "0.5162624", "0.51623416", "0.51551855", "0.5152335", "0.5152209", "0.5151529", "0.51312906", "0.51303047", "0.51010776", "0.5080758", "0.507864", "0.50781375", "0.50759023", "0.50735503", "0.5071721", "0.5061175", "0.50583667", "0.50429785", "0.5040177", "0.50329274", "0.50090295", "0.500738", "0.49953395", "0.49816045", "0.4981445", "0.49686414", "0.49669528", "0.4963085", "0.4959674", "0.49425602", "0.4935899", "0.49268398", "0.4910846", "0.49032548", "0.4884643", "0.4884643", "0.48746097", "0.48705965", "0.48659346", "0.48622552", "0.48598716", "0.48583704", "0.48576888", "0.48570848", "0.48548543", "0.4841937", "0.4841645", "0.48347834", "0.4834601", "0.48282784", "0.4827087", "0.48240387", "0.48210543" ]
0.78266716
0
unmarshalStrings decodes a byte slice into an array of strings.
func unmarshalStrings(b []byte) (ret []string) { for { // If there's no more data then exit. if len(b) == 0 { return } // Decode size + data. n := binary.BigEndian.Uint16(b[0:2]) ret = append(ret, string(b[2:n+2])) // Move the byte slice forward and retry. b = b[n+2:] } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func UnmarshalStrippedStringSlice(reader io.Reader, consumer runtime.Consumer) ([]StrippedString, error) {\n\tvar elements []json.RawMessage\n\tif err := consumer.Consume(reader, &elements); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result []StrippedString\n\tfor _, element := range elements {\n\t\tobj, err := unmarshalStrippedString(element, consumer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, obj)\n\t}\n\treturn result, nil\n}", "func (s *StringArray) DecodeFromBytes(b []byte) error {\n\ts.ArraySize = int32(binary.LittleEndian.Uint32(b[:4]))\n\tif s.ArraySize <= 0 {\n\t\treturn nil\n\t}\n\n\tvar offset = 4\n\tfor i := 1; i <= int(s.ArraySize); i++ {\n\t\tstr, err := DecodeString(b[offset:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.Strings = append(s.Strings, str)\n\t\toffset += str.Len()\n\t}\n\n\treturn nil\n}", "func string2ByteSlice(str string) (bs []byte) {\n\tstrHdr := (*reflect.StringHeader)(unsafe.Pointer(&str))\n\tsliceHdr := (*reflect.SliceHeader)(unsafe.Pointer(&bs))\n\tsliceHdr.Data = strHdr.Data\n\tsliceHdr.Len = strHdr.Len\n\tsliceHdr.Cap = strHdr.Len\n\t// This KeepAlive line is essential to make the\n\t// String2ByteSlice function be always valid\n\t// when it is provided in other custom packages.\n\truntime.KeepAlive(&str)\n\treturn\n}", "func decodeByteSlice(s *Stream, val reflect.Value) error {\n\t// b = byte slice contained string content\n\tb, err := s.Bytes()\n\tif err != nil {\n\t\treturn wrapStreamError(err, val.Type())\n\t}\n\tval.SetBytes(b)\n\treturn nil\n}", "func string2bytes(s string) []byte {\n\tstringHeader := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\n\tbh := reflect.SliceHeader{\n\t\tData: stringHeader.Data,\n\t\tLen: stringHeader.Len,\n\t\tCap: stringHeader.Len,\n\t}\n\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func bytesToStrings(in []byte) []string {\n\ts := strings.TrimRight(string(in), \"\\n\")\n\tif s == \"\" { // empty (not {\"\"}, len=1)\n\t\treturn []string{}\n\t}\n\treturn strings.Split(s, \"\\n\")\n}", "func marshalStrings(a []string) (ret []byte) {\n\tfor _, s := range a {\n\t\t// Create a slice for len+data\n\t\tb := make([]byte, 2+len(s))\n\t\tbinary.BigEndian.PutUint16(b[0:2], uint16(len(s)))\n\t\tcopy(b[2:], s)\n\n\t\t// Append it to the full byte slice.\n\t\tret = append(ret, b...)\n\t}\n\treturn\n}", "func DecodeStringArray(b []byte) (*StringArray, error) {\n\ts := &StringArray{}\n\tif err := s.DecodeFromBytes(b); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}", "func unmarshalString(v []byte) (string, error) {\n\tvar str string\n\terr := json.Unmarshal(v, &str)\n\tif err != nil {\n\t\treturn str, errors.Wrap(err, \"could not unmarshal bytes to string\")\n\t}\n\treturn str, nil\n}", "func UnmarshalBytes(src []byte) ([]byte, []byte, error) {\n\ttail, n, err := UnmarshalVarUint64(src)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"cannot unmarshal string size: %w\", err)\n\t}\n\tsrc = tail\n\tif uint64(len(src)) < n {\n\t\treturn nil, nil, fmt.Errorf(\"src is too short for reading string with size %d; len(src)=%d\", n, len(src))\n\t}\n\treturn src[n:], src[:n], nil\n}", "func (o *Strings) FromBytes(data []byte) error {\n\tbuf := uio.NewBigEndianBuffer(data)\n\tif buf.Len() == 0 {\n\t\treturn fmt.Errorf(\"Strings DHCP option must always list at least one String\")\n\t}\n\n\t*o = make(Strings, 0)\n\tfor buf.Has(1) {\n\t\tucLen := buf.Read8()\n\t\tif ucLen == 0 {\n\t\t\treturn fmt.Errorf(\"DHCP Strings must have length greater than 0\")\n\t\t}\n\t\t*o = append(*o, string(buf.CopyN(int(ucLen))))\n\t}\n\treturn buf.FinError()\n}", "func SolidityUnpackString(data string, types []string) ([]interface{}, error) {\n\n\tif data[0:2] == \"0x\" {\n\t\tdata = data[2:]\n\t}\n\tvar resp = make([]interface{}, len(types))\n\tvar stringCount = 0\n\tfor i := 0; i < len(types); i++ {\n\t\tpartialData := data[i*64 : (i+1)*64]\n\t\tconvertedData, count, err := parseNextValueFromSolidityHexStr(partialData, types[i], data[i*64:], len(types)-i, stringCount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstringCount = count\n\t\tresp[i] = convertedData\n\t}\n\treturn resp, nil\n}", "func NewFromByteSlice(bytes []byte) *Stringish {\n\treturn &Stringish{string(bytes)}\n}", "func convertStringToByte(s string) []byte {\n\tvar b []byte\n\tstrHeader := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tsliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n\tsliceHeader.Data = strHeader.Data\n\tsliceHeader.Len = strHeader.Len\n\tsliceHeader.Cap = strHeader.Len\n\treturn b\n}", "func DisassembleStrings(c *cryptos.Crypto, s []byte) ([]string, error) {\n\tr, err := DisassembleString(c, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn strings.Split(r, \" \"), nil\n}", "func StringToByteSlice(s *string) []byte {\n\tvar bytes []byte\n\tstringHeader := (*reflect.StringHeader)(unsafe.Pointer(s))\n\tbytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))\n\tbytesHeader.Data = stringHeader.Data\n\tbytesHeader.Len = stringHeader.Len\n\tbytesHeader.Cap = stringHeader.Len\n\treturn bytes\n}", "func StringBytes(b []byte) string { return *(*string)(Pointer(&b)) }", "func ByteSlice2String(bs []byte) string {\n\treturn *(*string)(unsafe.Pointer(&bs))\n}", "func (d LEByteDecoder) DecodeStringArray(buf *bytes.Reader, size int) ([]string, error) {\n\t// Use minimum string byte size = 4\n\tif err := CheckSize(buf, size*4); err != nil {\n\t\treturn nil, errors.Wrap(err, \"decoding string array\")\n\t}\n\n\t// String format is: [size|string] where size is a u32.\n\tslice := make([]string, size)\n\tfor i := 0; i < size; i++ {\n\t\tvar strSize uint32\n\t\tvar err error\n\t\tif strSize, err = d.DecodeUint32(buf); err != nil {\n\t\t\treturn slice, errors.Wrap(err, \"decoding string array\")\n\t\t}\n\n\t\tvar value []uint8\n\t\tvalue, err = d.DecodeUint8Array(buf, int(strSize))\n\t\tif err != nil {\n\t\t\treturn slice, errors.Wrap(err, \"decoding string array\")\n\t\t}\n\t\tslice[i] = string(value)\n\t}\n\treturn slice, nil\n}", "func UnmarshalBytes(src []byte) (map[string]string, error) {\n\treturn UnmarshalBytesWithLookup(src, nil)\n}", "func s2b(s string) []byte {\n\tsh := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tbh := reflect.SliceHeader{\n\t\tData: sh.Data,\n\t\tLen: sh.Len,\n\t\tCap: sh.Len,\n\t}\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func UnsafeStringBytes(s *string) []byte {\n\treturn *(*[]byte)(unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(s))))\n}", "func decodeBulkString(r BytesReader) (interface{}, error) {\n\t// First comes the length of the bulk string, an integer\n\tcnt, err := decodeInteger(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch {\n\tcase cnt == -1:\n\t\t// Special case to represent a nil bulk string\n\t\treturn nil, nil\n\n\tcase cnt < -1:\n\t\treturn nil, ErrInvalidBulkString\n\n\tdefault:\n\t\t// Then the string is cnt long, and bytes read is cnt+n+2 (for ending CRLF)\n\t\tneed := cnt + 2\n\t\tgot := 0\n\t\tbuf := make([]byte, need)\n\t\tfor {\n\t\t\tnb, err := r.Read(buf[got:])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tgot += nb\n\t\t\tif int64(got) == need {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn string(buf[:got-2]), err\n\t}\n}", "func ExampleUnmarshal_slice() {\n\tstr := \"[1,2,3]\"\n\tobj, err := Unmarshal([]byte(str))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tslc, ok := obj.([]interface{})\n\tif ok {\n\t\tfmt.Println(\"Length:\", len(slc))\n\t}\n\t// Output: Length: 3\n}", "func (bs *ByteSlice) LoadString(str string) error {\n\tb, err := hex.DecodeString(str)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*bs = ByteSlice(b)\n\treturn nil\n}", "func (bs *ByteSlice) UnmarshalJSON(b []byte) error {\n\tvar str string\n\tif err := json.Unmarshal(b, &str); err != nil {\n\t\treturn err\n\t}\n\treturn bs.LoadString(str)\n}", "func unpackArgSString(x []string) (unpacked **C.char, allocs *cgoAllocMap) {\n\tif x == nil {\n\t\treturn nil, nil\n\t}\n\tallocs = new(cgoAllocMap)\n\tdefer runtime.SetFinalizer(&unpacked, func(***C.char) {\n\t\tgo allocs.Free()\n\t})\n\n\tlen0 := len(x)\n\tmem0 := allocPCharMemory(len0)\n\tallocs.Add(mem0)\n\th0 := &sliceHeader{\n\t\tData: uintptr(mem0),\n\t\tCap: len0,\n\t\tLen: len0,\n\t}\n\tv0 := *(*[]*C.char)(unsafe.Pointer(h0))\n\tfor i0 := range x {\n\t\tv0[i0], _ = unpackPCharString(x[i0])\n\t}\n\th := (*sliceHeader)(unsafe.Pointer(&v0))\n\tunpacked = (**C.char)(unsafe.Pointer(h.Data))\n\treturn\n}", "func reverseUTF8ByteSlice(s []byte) []byte {\n\ts2 := make([]byte, len(s))\n\tcopy(s2, s)\n\tt := len(s2)\n\tfor i := 0; i < len(s); {\n\t\t_, size := utf8.DecodeRune(s2[i:])\n\t\tt -= size\n\t\tcopy(s[t:t+size], s2[i:i+size])\n\t\ti += size\n\t}\n\treturn s\n}", "func SlicePtrFromStrings(ss []string) ([]*byte, error) {\n\tvar err error\n\tbb := make([]*byte, len(ss)+1)\n\tfor i := 0; i < len(ss); i++ {\n\t\tbb[i], err = BytePtrFromString(ss[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tbb[len(ss)] = nil\n\treturn bb, nil\n}", "func MarshalStringSlice(item Any) []*string {\n\tvar data []*string\n\n\tswitch reflect.TypeOf(item).Kind() {\n\tcase reflect.Slice:\n\t\tval := reflect.ValueOf(item)\n\t\tmax := val.Len()\n\t\tfor i := 0; i < max; i++ {\n\t\t\ts := fmt.Sprint(val.Index(i).Interface())\n\t\t\tdata = append(data, &s)\n\t\t}\n\t}\n\treturn data\n}", "func StringToSliceHookFunc(sep string) DecodeHookFunc {\n\treturn func(\n\t\tf reflect.Kind,\n\t\tt reflect.Kind,\n\t\tdata interface{}) (interface{}, error) {\n\t\tif f != reflect.String || t != reflect.Slice {\n\t\t\treturn data, nil\n\t\t}\n\n\t\traw := data.(string)\n\t\tif raw == \"\" {\n\t\t\treturn []string{}, nil\n\t\t}\n\n\t\treturn strings.Split(raw, sep), nil\n\t}\n}", "func StringToBytes(s string) []byte {\n\tsh := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tb := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{\n\t\tData: sh.Data,\n\t\tLen: sh.Len,\n\t\tCap: sh.Len,\n\t}))\n\t// ensure the underlying string doesn't get GC'ed before the assignment happens\n\truntime.KeepAlive(&s)\n\n\treturn b\n}", "func (d *decoder) unmarshalString(t reflect.Type, v reflect.Value, n nestedTypeData) error {\n\tsize := int64(d.readUint(8))\n\tif size < 0 {\n\t\treturn newExpectError(ErrStringTooLong, math.MaxInt64, size)\n\t}\n\tif ptr := d.readUint(8); ptr == noAlloc {\n\t\tif t.Kind() != reflect.Ptr {\n\t\t\treturn newValueError(ErrUnexpectedNullRef, \"string\")\n\t\t}\n\t\tv.Set(reflect.Zero(t))\n\t\treturn nil\n\t}\n\tmax := n.Unnest()\n\tif max != nil && int(size) > *max {\n\t\treturn newExpectError(ErrStringTooLong, *max, size)\n\t}\n\ts := string(d.buffer[d.nextObject : d.nextObject+int(size)])\n\tif t.Kind() == reflect.Ptr {\n\t\tv.Set(reflect.New(t.Elem()))\n\t\tv.Elem().Set(reflect.ValueOf(s))\n\t} else {\n\t\tv.Set(reflect.ValueOf(s))\n\t}\n\td.nextObject += align(int(size), 8)\n\treturn nil\n}", "func (s *String) DecodeFromBytes(b []byte) error {\n\tif len(b) < 4 {\n\t\treturn errors.NewErrTooShortToDecode(s, \"should be longer than 4 bytes\")\n\t}\n\n\ts.Length = int32(binary.LittleEndian.Uint32(b[:4]))\n\tif s.Length <= 0 {\n\t\treturn nil\n\t}\n\ts.Value = b[4 : 4+s.Length]\n\treturn nil\n}", "func BytesString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func StringSlice(input interface{}) (output []string, err error) {\n\tvar castError error\n\tswitch castValue := input.(type) {\n\tcase StringSlicer:\n\t\toutput = castValue.StringSlice()\n\t\treturn\n\tcase []string:\n\t\toutput = castValue\n\t\treturn\n\tcase []interface{}:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\tif output[index], castError = String(castValue[index]); castError != nil {\n\t\t\t\t//todo. return better error\n\t\t\t\terr = NewCastError(\"Could not convert to string\")\n\t\t\t}\n\t\t}\n\t\treturn\n\tcase []int:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []int8:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []int16:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []int32:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []int64:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint8:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint16:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint32:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []uint64:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []float32:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []float64:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\tcase []bool:\n\t\toutput = make([]string, len(castValue))\n\t\tfor index := range castValue {\n\t\t\toutput[index], _ = String(castValue[index])\n\t\t}\n\t\treturn\n\n\tdefault:\n\t\terr = NewCastError(\"Could not convert to string\")\n\t}\n\treturn\n}", "func StringSlicePtr(ss []string) []*byte {\n\tbb := make([]*byte, len(ss)+1)\n\tfor i := 0; i < len(ss); i++ {\n\t\tbb[i] = StringBytePtr(ss[i])\n\t}\n\tbb[len(ss)] = nil\n\treturn bb\n}", "func Unmarshal([]byte) (WireMessage, error) { return nil, nil }", "func StringToBytes(s string) []byte {\n\tsh := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tbh := reflect.SliceHeader{sh.Data, sh.Len, 0}\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func StringToBytes(s string) []byte {\n\tsh := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tbh := reflect.SliceHeader{sh.Data, sh.Len, 0}\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func stringToBytes(s string) (bytes []byte) {\n\tstr := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tslice := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))\n\tslice.Data = str.Data\n\tslice.Len = str.Len\n\treturn bytes\n}", "func (b *BulkStringBytes) UnmarshalRESP(br *bufio.Reader) error {\n\tif err := assertBufferedPrefix(br, BulkStringPrefix); err != nil {\n\t\treturn err\n\t}\n\tn, err := bytesutil.BufferedIntDelim(br)\n\tnn := int(n)\n\tif err != nil {\n\t\treturn err\n\t} else if n == -1 {\n\t\tb.B = nil\n\t\treturn nil\n\t} else {\n\t\tb.B = bytesutil.Expand(b.B, nn)\n\t\tif b.B == nil {\n\t\t\tb.B = []byte{}\n\t\t}\n\t}\n\n\tif _, err := io.ReadFull(br, b.B); err != nil {\n\t\treturn err\n\t} else if _, err := bytesutil.BufferedBytesDelim(br); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func UnpackString(ch unsafe.Pointer, chlen int) string {\n\treturn C.GoStringN((*C.char)(ch), C.int(chlen))\n}", "func StringBytes(s string) []byte {\n\treturn *(*[]byte)(unsafe.Pointer(&s))\n}", "func StringToBytes(s string) []byte {\n\tstrstruct := stringStructOf(&s)\n\treturn *(*[]byte)(unsafe.Pointer(&sliceType2{\n\t\tArray: strstruct.Str,\n\t\tLen: strstruct.Len,\n\t\tCap: strstruct.Len,\n\t}))\n}", "func StringToByteArray(v string) []byte {\n\tvar slcHdr reflect.SliceHeader\n\tsh := *(*reflect.StringHeader)(unsafe.Pointer(&v))\n\tslcHdr.Data = sh.Data\n\tslcHdr.Cap = sh.Len\n\tslcHdr.Len = sh.Len\n\treturn *(*[]byte)(unsafe.Pointer(&slcHdr))\n}", "func byteSliceToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func decodeByteArray(s *Stream, val reflect.Value) error {\n\t// getting detailed information on encoded data\n\tkind, size, err := s.Kind()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// getting the length of declared ByteArray\n\tvlen := val.Len()\n\n\tswitch kind {\n\t// put a byte in a byte array\n\tcase Byte:\n\t\tif vlen == 0 {\n\t\t\treturn &decodeError{msg: \"input string too long\", typ: val.Type()}\n\t\t}\n\t\tif vlen > 1 {\n\t\t\treturn &decodeError{msg: \"input string too short\", typ: val.Type()}\n\t\t}\n\n\t\t// get the content and stores in the index 0\n\t\tbv, _ := s.Uint()\n\t\tval.Index(0).SetUint(bv)\n\n\t// put string in a byte array\n\tcase String:\n\t\tif uint64(vlen) < size {\n\t\t\treturn &decodeError{msg: \"input string too long\", typ: val.Type()}\n\t\t}\n\t\tif uint64(vlen) > size {\n\t\t\treturn &decodeError{msg: \"input string too short\", typ: val.Type()}\n\t\t}\n\n\t\t// transfer the byte array to byte slice and place string content inside\n\t\tslice := val.Slice(0, vlen).Interface().([]byte)\n\t\tif err := s.readFull(slice); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// reject cases where single byte encoding should have been used\n\t\tif size == 1 && slice[0] < 128 {\n\t\t\treturn wrapStreamError(ErrCanonSize, val.Type())\n\t\t}\n\t// byte array should not contain any list\n\tcase List:\n\t\treturn wrapStreamError(ErrExpectedString, val.Type())\n\t}\n\treturn nil\n}", "func StringToBytes(s string) []byte {\n\treturn *(*[]byte)(unsafe.Pointer(&s))\n}", "func ByteSlice(s string) []byte {\n\tsh := *(*StringHeader)(unsafe.Pointer(&s))\n\tbh := SliceHeader{\n\t\tData: sh.Data,\n\t\tLen: sh.Len,\n\t\tCap: sh.Len,\n\t}\n\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}", "func FromStringUnsafe(s string) []byte {\n\tvar b []byte\n\tpb := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n\tps := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tpb.Data = ps.Data\n\tpb.Len = ps.Len\n\tpb.Cap = ps.Len\n\treturn b\n}", "func decode(in []byte) ([][]byte, error) {\n\tif len(in) == 0 {\n\t\treturn nil, nil\n\t}\n\n\toffset, length, typ := decodeLength(in)\n\tend := offset + length\n\n\tif end > uint64(len(in)) {\n\t\treturn nil, fmt.Errorf(\"read length prefix of %d but there is only %d bytes of unconsumed input\",\n\t\t\tlength, uint64(len(in))-offset)\n\t}\n\n\tsuffix, err := decode(in[end:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch typ {\n\tcase reflect.String:\n\t\treturn append([][]byte{in[offset:end]}, suffix...), nil\n\tcase reflect.Slice:\n\t\tprefix, err := decode(in[offset:end])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn append(prefix, suffix...), nil\n\t}\n\n\treturn suffix, nil\n}", "func decodeByteSequence(val []byte) ([][]byte, error) {\n\ts := string(val)\n\tvar res [][]byte\n\tif s == \"\" {\n\t\treturn res, nil\n\t}\n\tfor _, v := range strings.Split(s, \",\") {\n\t\tbs, err := hexutil.Decode(v)\n\t\tif err != nil {\n\t\t\treturn [][]byte{}, err\n\t\t}\n\t\tres = append(res, bs)\n\t}\n\treturn res, nil\n}", "func (r *Decoder) Strings() []string {\n\tres := make([]string, r.Len())\n\tfor i := range res {\n\t\tres[i] = r.String()\n\t}\n\treturn res\n}", "func BytesToString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) }", "func Str2bytes(s string) []byte {\n\tstringHeader := *(*[2]int)(unsafe.Pointer(&s))\n\tvar sliceHeader [3]int\n\tsliceHeader[0] = stringHeader[0]\n\tsliceHeader[1] = stringHeader[1]\n\tsliceHeader[2] = stringHeader[1]\n\treturn *(*[]byte)(unsafe.Pointer(&sliceHeader))\n}", "func ToUTF8StringSlice(v []byte) ([]string, error) {\n\tpacket, _, err := DecodeNodePacket(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !packet.IsSlice() {\n\t\treturn nil, fmt.Errorf(\"v not a slice: %v\", utils.FormatBytes(v))\n\t}\n\tresult := make([]string, 0)\n\tfor _, p := range packet.PrimitivePackets {\n\t\tv, _ := p.ToUTF8String()\n\t\tresult = append(result, v)\n\t}\n\treturn result, nil\n}", "func TestSliceOfCustomByte(t *testing.T) {\n\ttype Uint8 uint8\n\n\ta := []Uint8(\"hello\")\n\n\tdata, err := Marshal(a)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar b []Uint8\n\terr = Unmarshal(data, &b)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(a, b) {\n\t\tt.Fatalf(\"expected %v == %v\", a, b)\n\t}\n}", "func (this *HandlerBase) getStringSlice(s string) []string {\n\ta := this.get(s)\n\tif m, ok := a.([]interface{}); ok {\n\t\tsl := make([]string, len(m))\n\t\tfor i, v := range m {\n\t\t\tif s, ok := v.(string); ok {\n\t\t\t\tsl[i] = string(s)\n\t\t\t}\n\t\t}\n\t\treturn sl\n\t}\n\treturn nil\n}", "func BytesToStr(in []byte, decoder *encoding.Decoder) (string, error) {\n\ti := bytes.NewReader(in)\n\to := transform.NewReader(i, decoder)\n\td, e := ioutil.ReadAll(o)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\treturn string(d), nil\n}", "func StringBytes(s string) []byte {\n\treturn []byte(s)\n}", "func UnmarshalTinySlice(r io.Reader, v interface{}) error {\n\t// v must be a pointer\n\tpval := reflect.ValueOf(v)\n\tif pval.Kind() != reflect.Ptr || pval.IsNil() {\n\t\treturn errors.New(\"cannot unmarshal tiny slice into invalid pointer\")\n\t}\n\tval := pval.Elem()\n\tswitch k := val.Kind(); k {\n\tcase reflect.Slice:\n\t\t// slices are variable length, have to allocate them first,\n\t\t// for that we need to read the 1 byte length prefix\n\t\tsliceLen, err := UnmarshalUint8(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// sanity-check the sliceLen, otherwise you can crash a peer by making\n\t\t// them allocate a massive slice\n\t\tif uint64(sliceLen)*uint64(val.Type().Elem().Size()) > MaxSliceSize {\n\t\t\treturn ErrSliceTooLarge\n\t\t}\n\n\t\tif sliceLen == 0 {\n\t\t\tval.Set(reflect.MakeSlice(val.Type(), 0, 0))\n\t\t\treturn nil\n\t\t}\n\t\tval.Set(reflect.MakeSlice(val.Type(), int(sliceLen), int(sliceLen)))\n\n\t\t// special case for byte slices\n\t\tif val.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t// convert val to a slice and read into it directly\n\t\t\tb := val.Slice(0, val.Len())\n\t\t\t_, err := io.ReadFull(r, b.Bytes()) // n (1st return param) is already checked by io.ReadFull\n\t\t\treturn err\n\t\t}\n\t\t// create regular sia decoder for the last part\n\t\td := NewDecoder(r)\n\t\t// slices are unmarshalled by sequentially unmarshalling their elements\n\t\tfor i := 0; i < val.Len(); i++ {\n\t\t\terr := d.Decode(val.Index(i).Addr().Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"UnmarshalTinySlice failed to unmarshal element %d: %v\", i, err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\n\tcase reflect.String:\n\t\tvar b []byte\n\t\terr := UnmarshalTinySlice(r, &b)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tval.SetString(string(b))\n\t\treturn nil\n\n\tdefault:\n\t\treturn fmt.Errorf(\"UnmarshalTinySlice: non-slice type %s (kind: %s) is not supported\",\n\t\t\tval.Type().String(), k.String())\n\t}\n}", "func stringToBytes(in string) []byte {\n\tvar buf bytes.Buffer\n\trunes := []rune(in)\n\n\tbinary.Write(&buf, binary.BigEndian, int16(len(runes)))\n\tfor _, r := range runes {\n\t\tbinary.Write(&buf, binary.BigEndian, uint16(r))\n\t}\n\treturn buf.Bytes()\n}", "func bytesToString(bs []byte) string {\n\treturn *(*string)(unsafe.Pointer(&bs))\n}", "func String(b []byte) (s string) {\n pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))\n pstring.Data = pbytes.Data\n pstring.Len = pbytes.Len\n return\n}", "func StringFromBytes(b []byte) String {\n\treturn StringFromString(string(b))\n}", "func BytesToString(b []byte) string {\n\tbh := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n\tsh := reflect.StringHeader{bh.Data, bh.Len}\n\treturn *(*string)(unsafe.Pointer(&sh))\n}", "func BytesToString(b []byte) string {\n\tbh := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n\tsh := reflect.StringHeader{bh.Data, bh.Len}\n\treturn *(*string)(unsafe.Pointer(&sh))\n}", "func convertBytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func cStringArrayToSlice(array **C.char, length int) [](*C.char) {\n\treturn (*[1 << 30](*C.char))(unsafe.Pointer(array))[:length:length]\n}", "func unmarshalMultiStrJSON(v []byte) ([]string, error) {\n\tdops := [][]string{}\n\tops := []string{}\n\n\terr := json.Unmarshal(v, &dops)\n\tif err != nil {\n\t\treturn ops, errors.Wrap(err, \"could not unmarshal bytes into []string\")\n\t}\n\n\tfor _, i := range dops {\n\t\tops = append(ops, i...)\n\t}\n\treturn ops, nil\n}", "func cleanBytesToString(s []byte) string {\n\treturn strings.SplitN(string(s), \"\\000\", 2)[0]\n}", "func BytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func BytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func BytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func BytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func BytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func BytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func BytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func BytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func (arg1 *UConverter) FromUnicode(in *[]byte, out *[]uint16, offsets *[]int32, arg5 bool, arg6 *UErrorCode)", "func bytesToString(bytes []byte) (s string) {\n\tslice := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))\n\tstr := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tstr.Data = slice.Data\n\tstr.Len = slice.Len\n\treturn s\n}", "func bytesToString(bytes []byte) (s string) {\n\tslice := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))\n\tstr := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tstr.Data = slice.Data\n\tstr.Len = slice.Len\n\treturn s\n}", "func getBytes(raw string) []byte {\n\trawSlice := strings.FieldsFunc(raw, isArray)\n\tresult := make([]byte, len(rawSlice))\n\n\ti := 0\n\tfor _, b := range rawSlice {\n\t\tfmt.Sscan(b, &result[i])\n\t\ti++\n\t}\n\treturn result\n}", "func unmarshal(s *string, v interface{}) error {\n\tif s != nil && len(*s) > 0 {\n\t\terr := json.Unmarshal([]byte(*s), v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func SliceByteToString(b []byte) string {\n\tbh := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n\tsh := reflect.StringHeader{\n\t\tData: bh.Data,\n\t\tLen: bh.Len,\n\t}\n\treturn *(*string)(unsafe.Pointer(&sh))\n}", "func decodeString(b byteReader) (string, error) {\n\tlength, err := binary.ReadVarint(b)\n\tif length < 0 {\n\t\terr = fmt.Errorf(\"found negative string length during decoding: %d\", length)\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuf := getBuf(int(length))\n\tdefer putBuf(buf)\n\n\tif _, err := io.ReadFull(b, buf); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(buf), nil\n}", "func StringToBytes(s string) []byte {\n\tx := (*[2]uintptr)(unsafe.Pointer(&s))\n\th := [3]uintptr{x[0], x[1], x[1]}\n\treturn *(*[]byte)(unsafe.Pointer(&h))\n}", "func String(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func UnshiftString(array []string, val string) {\n\tnewArray := []string{val}\n\tarray = append(newArray, array...)\n}", "func StringToSliceByte(something string) []byte {\n\tsliceByte := make([]byte, len(something))\n\tcopy(sliceByte, something)\n\treturn sliceByte\n}", "func (b *BulkString) UnmarshalRESP(br *bufio.Reader) error {\n\tif err := assertBufferedPrefix(br, BulkStringPrefix); err != nil {\n\t\treturn err\n\t}\n\tn, err := bytesutil.BufferedIntDelim(br)\n\tif err != nil {\n\t\treturn err\n\t} else if n == -1 {\n\t\tb.S = \"\"\n\t\treturn nil\n\t}\n\n\tscratch := bytesutil.GetBytes()\n\tdefer bytesutil.PutBytes(scratch)\n\t*scratch = bytesutil.Expand(*scratch, int(n))\n\n\tif _, err := io.ReadFull(br, *scratch); err != nil {\n\t\treturn err\n\t} else if _, err := bytesutil.BufferedBytesDelim(br); err != nil {\n\t\treturn err\n\t}\n\n\tb.S = string(*scratch)\n\treturn nil\n}", "func decodeSimpleString(r BytesReader) (interface{}, error) {\n\tv, err := r.ReadBytes('\\r')\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Presume next byte was \\n\n\t_, err = r.ReadByte()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn string(v[:len(v)-1]), nil\n}", "func ReadBufAsStringSlice(buf Records) ([]string, error) {\n\tres := make([]string, 0, 10)\n\tvar rdr Reader\n\terr := rdr.Reset(buf, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor {\n\t\tr, err := rdr.Get(nil)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tres = append(res, string(r))\n\t\trdr.Next(nil)\n\t}\n\treturn res, nil\n}", "func DeserializeString(d []byte) (String, error) {\n\treturn String(d), nil\n}", "func Slice(s string) (b []byte) {\n\tpbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n\tpstring := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tpbytes.Data = pstring.Data\n\tpbytes.Len = pstring.Len\n\tpbytes.Cap = pstring.Len\n\treturn\n}", "func StringToBytes(s string) []byte {\n\tsp := *(*[2]uintptr)(unsafe.Pointer(&s))\n\tbp := [3]uintptr{sp[0], sp[1], sp[1]}\n\treturn *(*[]byte)(unsafe.Pointer(&bp))\n}", "func StringToBytes(s string) []byte {\n\tsp := *(*[2]uintptr)(unsafe.Pointer(&s))\n\tbp := [3]uintptr{sp[0], sp[1], sp[1]}\n\treturn *(*[]byte)(unsafe.Pointer(&bp))\n}", "func StringToBytes(s string) []byte {\n\tsp := *(*[2]uintptr)(unsafe.Pointer(&s))\n\tbp := [3]uintptr{sp[0], sp[1], sp[1]}\n\treturn *(*[]byte)(unsafe.Pointer(&bp))\n}", "func readString(dst, b []byte) ([]byte, []byte, error) {\n\tvar n uint64\n\tvar err error\n\tmustDecode := (b[0]&128 == 128) // huffman encoded\n\tb, n, err = readInt(7, b)\n\tif err == nil && uint64(len(b)) < n {\n\t\terr = fmt.Errorf(\"unexpected size: %d < %d\", len(b), n)\n\t}\n\tif err == nil {\n\t\tif mustDecode {\n\t\t\tdst = HuffmanDecode(dst, b[:n])\n\t\t} else {\n\t\t\tdst = append(dst, b[:n]...)\n\t\t}\n\t\tb = b[n:]\n\t}\n\treturn b, dst, err\n}" ]
[ "0.6354693", "0.621544", "0.6198837", "0.6116478", "0.59895563", "0.59371823", "0.58865935", "0.5883952", "0.58423537", "0.5834272", "0.5758893", "0.5716716", "0.5696275", "0.5684589", "0.5683891", "0.56775504", "0.56536967", "0.5644599", "0.5643686", "0.5639748", "0.56235594", "0.5622889", "0.5614032", "0.56078106", "0.559497", "0.5588304", "0.5586089", "0.5569778", "0.5562483", "0.55302405", "0.5524486", "0.54809654", "0.5477233", "0.5453917", "0.54418224", "0.54225415", "0.53901035", "0.5386258", "0.53729206", "0.53729206", "0.5370713", "0.5369764", "0.53661937", "0.5360221", "0.5343872", "0.53233385", "0.53108734", "0.5281567", "0.5271272", "0.5251743", "0.52412295", "0.52318656", "0.522511", "0.5210429", "0.5205436", "0.5194601", "0.5161854", "0.515782", "0.51396656", "0.51366514", "0.5132995", "0.5129772", "0.5124938", "0.51219463", "0.5121675", "0.51070327", "0.51010203", "0.51010203", "0.509846", "0.5082425", "0.5076642", "0.5071031", "0.50700957", "0.50700957", "0.50700957", "0.50700957", "0.50700957", "0.50700957", "0.50700957", "0.50700957", "0.50694644", "0.5066516", "0.5066516", "0.506125", "0.50505185", "0.5049018", "0.5044772", "0.5032911", "0.50292265", "0.5024079", "0.5022885", "0.5022583", "0.5022301", "0.50094503", "0.5001858", "0.49961683", "0.49956402", "0.49956402", "0.49956402", "0.49939415" ]
0.7369895
0
logging a method calling information
func WithMethodCallingLoggerServerInterceptor(logger *logger.Logger) grpc.UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { l := logger.WithServiceInfo(info.FullMethod) requestID := GetRequestIDFromContext(ctx) l = l.WithRequestID(requestID) l.Println("calling: " + info.FullMethod) if r, ok := req.(fmt.Stringer); ok { l.Println("Body: " + r.String()) } resp, err = handler(ctx, req) if resp != nil { if r, ok := resp.(fmt.Stringer); ok { l.Println("Body: " + r.String()) } } return resp, err } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *B) Log(args ...interface{})", "func (c *T) Log(args ...interface{})", "func Info(msg string) {\n log.Info(msg)\n}", "func (conf Configuration) OnCall(method string, cont Context, argument interface{}) {\n\tlog.Printf(\"%v %v: Called\\nContext=%# v\\nArgument=%# v\\n\\n\", conf.Name, method, pretty.Formatter(cont), pretty.Formatter(argument))\n}", "func (lm *loggingMiddleware) logging(begin time.Time, method string, err error) {\n\t_ = lm.logger.Log(\"method\", method, \"took\", time.Since(begin), \"err\", err)\n}", "func (_m *StdLogger) Info(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_m.Called(_ca...)\n}", "func (_m *Logger) Info(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_m.Called(_ca...)\n}", "func (cLogger CustomLogger) Info(args ...interface{}) {\n\tlevel.Info(log.With(loggerInstance, \"caller\", getCallerInfo())).Log(args...)\n}", "func (_m *T) Log(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_m.Called(_ca...)\n}", "func (c Context) Info(msg string) {\n\tc.Log(30, msg, GetCallingFunction())\n}", "func (l *stubLogger) Infof(format string, args ...interface{}) {}", "func Log() {\n\n}", "func (logger *Logger) Info(message string) {\n\t_, fn, line, _ := runtime.Caller(1)\n\tlog.WithFields(logger.getLogFields(fn, line)).Info(message)\n}", "func (l *Logger) PrintCaller(skip int) {\n\tl.Log(Info, SPrintCaller(skip+2))\n}", "func (*traceLogger) Infof(msg string, args ...any) { log.Infof(msg, args...) }", "func TraceCall(log logr.Logger) {\n\tcallerInfo := GetCaller(MyCaller, true)\n\tlog.V(TraceLevel).Info(\"Entering function\", \"function\", callerInfo.FunctionName, \"source\", callerInfo.SourceFile, \"line\", callerInfo.SourceLine)\n}", "func Info(m ...interface{}) {\n\tpc := make([]uintptr, 15)\n\tn := runtime.Callers(2, pc)\n\tframes := runtime.CallersFrames(pc[:n])\n\tframe, _ := frames.Next()\n\tlog.Printf(\"INFO [%v] %v\", frame.Function, fmt.Sprintln(m...))\n}", "func Logging(ctx context.Context, request interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (response interface{}, err error) {\r\n\tstart := time.Now()\r\n\r\n\tlog.CtxInfof(ctx, \"calling %s, request=%s\", info.FullMethod, marshal(request))\r\n\tresponse, err = handler(ctx, request)\r\n\tlog.CtxInfof(ctx, \"finished %s, cost=%v, response=%v, err=%v\", info.FullMethod, time.Since(start), response, err)\r\n\r\n\treturn response, err\r\n}", "func (px *Paxos) clog(dbg bool, funcname, format string, args ...interface{}) {\n\tif dbg {\n\t\tl1 := fmt.Sprintf(\"[%s] me:%d\\n\", funcname, px.me)\n\t\tl2 := fmt.Sprintf(\"....\"+format, args...)\n\t\tfmt.Println(l1 + l2)\n\t}\n}", "func (l *customLogrus) Info(v ...interface{}) {\n\tl.mu.Lock()\n\tl.Rotate()\n\tl.Instance.WithField(\"file\", l.CallFileInfo()).Info(v)\n\tl.mu.Unlock()\n}", "func (l *Logger) Info(args ...interface{}) {\n\t_, file, line, _ := runtime.Caller(1)\n\tl.Log(InfoLevel, file, line, \"\", args...)\n}", "func Info(args ...interface{}) {\n\tif !rtLogConf.showFileInfo {\n\t\tlogrus.Info(args...)\n\t\treturn\n\t}\n\n\tif pc, file, line, ok := runtime.Caller(1); ok {\n\t\tfileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())\n\t\tlogrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Info(args...)\n\t} else {\n\t\tlogrus.Info(args...)\n\t}\n}", "func (l *Logger) Info(args ...interface{}) {\n\tif l.IsEnabledFor(InfoLevel) {\n\t\tfile, line := Caller(1)\n\t\tl.Log(InfoLevel, file, line, \"\", args...)\n\t}\n}", "func RInfo(msg ...interface{}) {\n\tif !logrus.IsLevelEnabled(logrus.InfoLevel) {\n\t\treturn\n\t}\n\n\tif pc, file, line, ok := runtime.Caller(1); ok {\n\t\tfile = file[strings.LastIndex(file, \"/\")+1:]\n\t\tfuncName := runtime.FuncForPC(pc).Name()\n\t\tlogrus.WithFields(\n\t\t\tlogrus.Fields{\n\t\t\t\t\"pos\": fmt.Sprintf(\"%s:%s:%d\", file, funcName, line),\n\t\t\t}).Info(msg...)\n\t} else {\n\t\tlogrus.Info(msg)\n\t}\n}", "func (logProxy *loggerProxy)Info(msgfmt string, args ...interface{}) {\n var ch loggerProxyChannel\n ch.fnPtr = logProxy.logObj.Info\n ch.msg = logProxy.appendlog(msgfmt, args...)\n logProxy.logChannel <- ch\n}", "func (nl *NullLogger) LogInfo(m ...interface{}) {\n}", "func (l *log)Infof(format string, args ...interface{}) {\n fmt.Printf(format, args...)\n}", "func Info(args ...interface{}) {\n LoggerOf(default_id).Info(args...)\n}", "func TraceCall_(trace interface{}) {}", "func (l *GrpcLog) Info(args ...interface{}) {\n\t// l.SugaredLogger.Info(args...)\n}", "func (session *dummySession) LogMethodEnter() {\n\tassert.Fail(session.t, \"Unexpected call to LogMethodEnter\")\n}", "func CallInfo(lv int) string {\n\tpc, file, line, ok := runtime.Caller(lv)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tfile = callerShortfile(file)\n\tfuncName := runtime.FuncForPC(pc).Name()\n\tfuncName = callerShortfile(funcName)\n\tfn := callerShortfile(funcName, ')')\n\tif len(fn) < len(funcName) {\n\t\tif len(fn) > 1 && fn[0] == '.' {\n\t\t\tfn = fn[1:]\n\t\t}\n\t\tfuncName = fn\n\t} else {\n\t\tfuncName = callerShortfile(funcName, '.')\n\t}\n\ts := fmt.Sprintf(\"%s:%d(%s)\", file, line, funcName)\n\treturn s\n}", "func (d *DummyLogger) Info(format string) {}", "func logFrom(method, msg string) {\n\tlog.Printf(\"Sender - %s - %s\", msg, method)\n}", "func (lc mockNotifyLogger) Info(msg string, args ...interface{}) {\n}", "func Log(fmt string, args ...interface{}) {}", "func Info(args ...interface{}) {\n\tinfoLog.Output(CallDepth, fmt.Sprint(args...))\n}", "func (l *Lgr) Info(args ...interface{}) {\n l.Logger.Info(args...)\n}", "func info(format string, args ...interface{}) {\n\tlfn := logfn\n\tif lfn == nil {\n\t\treturn\n\t}\n\tlfn(format, args...)\n}", "func (_m *StdLogger) Infof(format string, args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, format)\n\t_ca = append(_ca, args...)\n\t_m.Called(_ca...)\n}", "func callerInfo(skip int) string {\n\t_, file, line, _ := runtime.Caller(skip)\n\treturn fmt.Sprintf(\"%v:%v\", file, line)\n}", "func (_m *Logger) Infof(fmt string, args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, fmt)\n\t_ca = append(_ca, args...)\n\t_m.Called(_ca...)\n}", "func Infof(msg string, args ...interface{}) {\n log.Infof(msg, args...)\n}", "func (l LoggerFunc) Info(format string, args ...interface{}) {\n\tl(format, args...)\n}", "func (data *AccountData) LoggingAddDetails() {\n\n}", "func Info(args ...interface{}) {\n\tlog.Println(args...)\n}", "func Log(logger log.Logger, begin time.Time, err error, additionalKVs ...interface{}) {\n\tpc, _, _, _ := runtime.Caller(1)\n\tcaller := strings.Split(runtime.FuncForPC(pc).Name(), \".\")\n\tdefaultKVs := []interface{}{\n\t\t\"method\", caller[len(caller)-2],\n\t\t\"took\", time.Since(begin),\n\t\t\"success\", fmt.Sprint(err == nil),\n\t}\n\n\tif err != nil {\n\t\tdefaultKVs = append(defaultKVs, \"err\")\n\t\tdefaultKVs = append(defaultKVs, err)\n\t\tlevel.Error(logger).Log(defaultKVs...)\n\t} else {\n\t\tlevel.Info(logger).Log(append(defaultKVs, additionalKVs...)...)\n\t}\n}", "func LogCachedQuery(method string) {\n\tLogger.WithFields(logrus.Fields{\n\t\t\"method\": method,\n\t}).Info(\"processed a cached query\")\n}", "func logger(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {\n\tlog.Printf(\"---> Unary interceptor: %v\\n\", info.FullMethod)\n\treturn handler(ctx, req)\n}", "func (log *Logger) Info(arg0 interface{}, args ...interface{}) {\n\tconst (\n\t\tlvl = INFO\n\t)\n\tswitch first := arg0.(type) {\n\tcase string:\n\t\t// Use the string as a format string\n\t\tlog.intLogf(lvl, first, args...)\n\tcase func() string:\n\t\t// Log the closure (no other arguments used)\n\t\tlog.intLogc(lvl, first)\n\tdefault:\n\t\t// Build a format string so that it will be similar to Sprint\n\t\tlog.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(\" %v\", len(args)), args...)\n\t}\n}", "func (d *DummyLogger) Infof(format string, args ...interface{}) {}", "func (_Logger *LoggerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {\n\treturn _Logger.Contract.LoggerCaller.contract.Call(opts, result, method, params...)\n}", "func (_Logger *LoggerCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {\n\treturn _Logger.Contract.contract.Call(opts, result, method, params...)\n}", "func (l *logWrapper) Info(args ...interface{}) {\n\tl.Log(LogInfo, 3, args...)\n}", "func (l *Logger) Infof(msg string, args ...interface{}) {\n\t_, file, line, _ := runtime.Caller(1)\n\tl.Log(InfoLevel, file, line, msg, args...)\n}", "func (_m *Logger) Info(msg string, additionalValues ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, msg)\n\t_ca = append(_ca, additionalValues...)\n\t_m.Called(_ca...)\n}", "func Log(msg string, err error) {\n\n}", "func Info(args ...interface{}) {\n\tlog.Info(args...)\n}", "func Info(args ...interface{}) {\n\tlog.Info(args...)\n}", "func Info(id int64, args ...interface{}) {\n\tif IsOn() {\n\t\tAddTraceEvent(id, 1, &TraceEventDesc{\n\t\t\tDesc: fmt.Sprint(args...),\n\t\t\tSeverity: CtINFO,\n\t\t})\n\t} else {\n\t\tgrpclog.InfoDepth(1, args...)\n\t}\n}", "func (c *B) Logf(format string, args ...interface{})", "func (c *ServiceController) LogInfo(v ...interface{}) {\n\ta := fmt.Sprint(v)\n\tlogger.Info(\"ServiceController: [Inf] \", a[1:len(a)-1])\n}", "func (logger *Logger) Info(args ...interface{}) {\n\tlogger.std.Log(append([]interface{}{\"Info\"}, args...)...)\n}", "func Infof(msg string, args ...interface{}) {\n\tif !rtLogConf.showFileInfo {\n\t\tlogrus.Infof(msg, args...)\n\t\treturn\n\t}\n\n\tif pc, file, line, ok := runtime.Caller(1); ok {\n\t\tfileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())\n\t\tlogrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Infof(msg, args...)\n\t} else {\n\t\tlogrus.Infof(msg, args...)\n\t}\n}", "func (cLogger CustomLogger) Infof(format string, args ...interface{}) {\n\tlevel.Info(log.With(loggerInstance, \"caller\", getCallerInfo())).Log(\"message\", fmt.Sprintf(format, args...))\n}", "func (z *ZapLogWrapper) Info(args ...interface{}) {\n\tz.l.Info(args...)\n}", "func lInfo(v ...interface{}) {\n\tinfoLogger.Println(v...)\n}", "func (l nullLogger) Info(msg string, ctx ...interface{}) {}", "func (l Logger) Info(s string) {\n\tlog.Infof(fmtSpec, callerInfo(), l, s)\n}", "func (l Mylog) Info(ctx context.Context, msg string, data ...interface{}) {\n\tl.ServiceLog.Info(msg, data)\n\t//if l.LogLevel >= Info {\n\t//\tl.Printf(l.infoStr+msg, append([]interface{}{utils.FileWithLineNum()}, data...)...)\n\t//}\n}", "func (cLogger CustomLogger) Log(args ...interface{}) error {\n\treturn log.With(loggerInstance, \"caller\", getCallerInfo()).Log(args...)\n}", "func traceInfo(info string) {\n\tif trace {\n\t\tlog.Println(info)\n\t}\n}", "func LogInfo(context string, module string, info string) {\n log.Info().\n Str(\"Context\", context).\n Str(\"Module\", module).\n Msg(info)\n}", "func (lc mockNotifyLogger) Trace(msg string, args ...interface{}) {\n}", "func (l *Logger) Trace(message string, args ...interface{}) { l.Log(Trace, message, args...) }", "func (l *Logger) Caller(n int) string {\n\t_, f, fl, _ := runtime.Caller(n)\n\tflStr := strconv.Itoa(fl)\n\treturn f + \":\" + flStr\n}", "func (cLogger CustomLogger) Infom(arg interface{}) {\n\tlevel.Info(log.With(loggerInstance, \"caller\", getCallerInfo())).Log(\"message\", arg)\n}", "func logInfo(format string, a ...interface{}) {\n\tfmt.Printf(format, a...)\n}", "func logging() {\n\tfmt.Println(\"Selesai memanggil function\")\n\tfmt.Println(\"\")\n}", "func (t t) Log(args ...interface{}) {\n\tfmt.Println(args...)\n}", "func VoidLogger(format string, args ...interface{}) {\n\n}", "func Info(args ...interface{}) {\n\n}", "func Info(args ...interface{}) {\n\tlogWithFilename().Info(args...)\n}", "func (g GrcpGatewayLogger) Info(msg string) {\n\tif g.logger.silent(InfoLevel) {\n\t\treturn\n\t}\n\te := g.logger.header(InfoLevel)\n\tif g.logger.Caller > 0 {\n\t\t_, file, line, _ := runtime.Caller(g.logger.Caller)\n\t\te.caller(file, line, g.logger.FullpathCaller)\n\t}\n\te.Context(g.context).Msg(msg)\n}", "func PicUsefulLog() {\n\n}", "func (_m *StdLogger) Fatalln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_m.Called(_ca...)\n}", "func (session *dummySession) LogMethodParameter(parameters ...interface{}) {\n\tassert.Fail(session.t, \"Unexpected call to LogMethodParameter\")\n}", "func (l dclLogger) Info(args ...interface{}) {\n\tlog.Print(args...)\n}", "func (l dclLogger) Info(args ...interface{}) {\n\tlog.Print(args...)\n}", "func (l *Logger) Infof(msg string, args ...interface{}) {\n\tif l.IsEnabledFor(InfoLevel) {\n\t\tfile, line := Caller(1)\n\t\tl.Log(InfoLevel, file, line, msg, args...)\n\t}\n}", "func (c *CalendarController) LogInfo(v ...interface{}) {\n\ta := fmt.Sprint(v)\n\tlogger.Info(\"CalendarController: [Inf] \", a[1:len(a)-1])\n}", "func writeLog() {\n}", "func Info(args ...interface{}) {\n\tLog.Info(args...)\n}", "func (logProxy *loggerProxy)getCallerName() string{\n pc := make([]uintptr, 1)\n //Skipping the functions that are part of loggerProxy to get right caller.\t\n runtime.Callers(4, pc)\n f := runtime.FuncForPC(pc[0])\n return f.Name()\n}", "func Info(v ...interface{}){\n log.Info(v)\n}", "func (_m *StdLogger) Infoln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_m.Called(_ca...)\n}", "func ctrace(name string, args ...interface{}) {\n\t//log.Printf(\"TRACE %s(%v)\", name, args)\n}", "func Info(args ...interface{}) {\r\n\tif *gloged {\r\n\t\tglog.Info(args...)\r\n\t} else {\r\n\t\tlog.Println(args...)\r\n\t}\r\n}", "func (z *Logger) Info(args ...interface{}) {\n\tz.SugaredLogger.Info(args...)\n}", "func logEndpoint(req *http.Request) {\n\tlog.Println(req.Method + \": \" + req.URL.RequestURI())\n}", "func doLog(ctx context.Context) {\n\t_, file, line, _ := runtime.Caller(0)\n\tlogPos.File, logPos.Line = file, line+2\n\tdlog.Infof(ctx, \"grep for this\")\n}" ]
[ "0.67740566", "0.664917", "0.6611569", "0.6609859", "0.6541663", "0.6528878", "0.6526598", "0.6499431", "0.6489033", "0.64533347", "0.63796335", "0.63693124", "0.63527495", "0.6341607", "0.63408357", "0.63373005", "0.6324299", "0.63202775", "0.6287711", "0.62851906", "0.6247795", "0.62334317", "0.6218927", "0.6215439", "0.6213273", "0.62096924", "0.62062144", "0.61897874", "0.6185655", "0.6153003", "0.6150578", "0.61391103", "0.6134278", "0.6126167", "0.6112137", "0.6111919", "0.61051965", "0.6097194", "0.60837007", "0.606093", "0.601689", "0.60141796", "0.600709", "0.6002429", "0.598559", "0.5983039", "0.5980611", "0.59776676", "0.59672016", "0.59567195", "0.59560126", "0.59435266", "0.5940871", "0.59347904", "0.59310836", "0.5927256", "0.59212023", "0.5914996", "0.5914996", "0.59123236", "0.59095937", "0.59094363", "0.59092337", "0.59040064", "0.58988374", "0.58976865", "0.5883741", "0.5874172", "0.58675134", "0.5867098", "0.586374", "0.5862248", "0.5860145", "0.5859665", "0.58550805", "0.5852766", "0.58498794", "0.5845001", "0.5841515", "0.5835993", "0.5831945", "0.5829557", "0.58279914", "0.5827258", "0.58210546", "0.58186996", "0.5814892", "0.58119357", "0.58119357", "0.5808029", "0.5799561", "0.5796609", "0.57904", "0.57882464", "0.5781681", "0.57807434", "0.5770257", "0.57694834", "0.5763472", "0.57624286", "0.57605666" ]
0.0
-1
parse exactly one level
func (server *Server) parse(ctx context.Context, r *types.Reference) (*types.Package, error) { node, err := server.api.Unixfs().Get(ctx, r.Path()) if err != nil { return nil, err } file := files.ToFile(node) opts := ld.NewJsonLdOptions(r.Resource) input, err := ld.NewJsonLdProcessor().FromRDF(file, opts) if err != nil { return nil, err } pkg, err := server.framePackage(r.Resource, input) pkg.ID = r.ID return pkg, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func parse(ts *TokenScanner) (*Tree, error) {\n\tvar tree *Tree\n\n\t// Insert dummy head to enable simplified recursion logic\n\ttree, _ = InitTree(Symbol{Dummy, dummyName})\n\n\tfor ts.hasNext() {\n\t\ttoken := ts.next()\n\n\t\tif chatty {\n\t\t\tfmt.Println(\"» parsing token =\", token)\n\t\t}\n\n\t\terr := ingest(ts, tree, token)\n\t\tif err != nil {\n\t\t\treturn nil, e(\"ingest in parse failed → \", err)\n\t\t}\n\t}\n\n\tif len(tree.Children) <= 0 {\n\t\treturn nil, e(\"no complete expression provided (empty AST)\")\n\t}\n\n\t// Remove the dummy head\n\ttree = tree.Children[0]\n\n\treturn tree, nil\n}", "func (t *Tree) parse() {\n\tt.Root = t.newStatement(t.peek().pos, nil)\n\top := t.peekNonSpace()\n\tif itemOperatorsStart <= op.typ && op.typ <= itemOperatorsEnd {\n\t\tt.Root.Operator = t.operator()\n\t}\n\ttok := t.nextNonSpace()\n\tif tok.typ != itemEOF {\n\t\tt.errorf(\"unexpected token after operator: %q\", tok)\n\t}\n}", "func (p *parser) parse() {\n\tp.Root = p.program()\n\tp.expect(TokenEOF)\n\t//if err := t.Root.Check(); err != nil {\n\t//\tt.error(err)\n\t//}\n}", "func parse(t *tokens) (Node, error) {\n\tcurrentToken := t.peek(0)\n\tif currentToken == leftSqBracket {\n\t\treturn parseArray(t)\n\t} else if currentToken == leftCurlBracket {\n\t\treturn parseObject(t)\n\t} else {\n\t\treturn TokenAsNode(t.consume())\n\t}\n}", "func (t *Tree) startParse(lex *lexer) {\n\tt.Root = nil\n\tt.lex = lex\n}", "func parseLevel(lvl string) (Level, error) {\n\tswitch strings.ToLower(lvl) {\n\tcase \"info\":\n\t\treturn InfoLevel, nil\n\tcase \"error\":\n\t\treturn ErrorLevel, nil\n\tcase \"debug\":\n\t\treturn DebugLevel, nil\n\t}\n\n\tvar l Level\n\treturn l, fmt.Errorf(\"not a valid Level: %q\", lvl)\n}", "func parseRecursively(tokens []token, idx int) (PipelineNode, int) {\n\tret, idx := parseCommand(tokens, idx)\n\n\tif idx < len(tokens) {\n\t\tswitch tokens[idx].tokenType {\n\t\tcase plus_token:\n\t\t\tret, idx = parsePlus(tokens, idx, ret)\n\t\t\treturn ret, idx\n\t\tcase minus_token:\n\t\t\tret, idx = parseMinus(tokens, idx, ret)\n\t\t\treturn ret, idx\n\t\tcase pipe_token:\n\t\t\tidx += 1\n\t\t\treturn ret, idx\n\t\tcase identifier_token:\n\t\t\tpanic(\"Unexpected identifier_token\")\n\t\tcase string_literal_token:\n\t\t\tpanic(\"Unexpected string_literal_token\")\n\t\t}\n\t} else {\n\t\treturn ret, idx\n\t}\n\n\tpanic(\"Unreachable\")\n}", "func parseExpansionLevel(raw []string) *ExpansionLevel {\n\tsort.Strings(raw)\n\n\tlevel := &ExpansionLevel{expansions: make(map[string]*ExpansionLevel)}\n\tgroups := make(map[string][]string)\n\n\tfor _, expansion := range raw {\n\t\tparts := strings.Split(expansion, \".\")\n\t\tif len(parts) == 1 {\n\t\t\tif parts[0] == \"*\" {\n\t\t\t\tlevel.wildcard = true\n\t\t\t} else {\n\t\t\t\tlevel.expansions[parts[0]] =\n\t\t\t\t\t&ExpansionLevel{expansions: make(map[string]*ExpansionLevel)}\n\t\t\t}\n\t\t} else {\n\t\t\tgroups[parts[0]] = append(groups[parts[0]], strings.Join(parts[1:], \".\"))\n\t\t}\n\t}\n\n\tfor key, subexpansions := range groups {\n\t\tlevel.expansions[key] = parseExpansionLevel(subexpansions)\n\t}\n\n\treturn level\n}", "func dive(tokenizer *html.Tokenizer) *Node {\n\tvar parent *Node\n\tvar root *Node\n\n\tfor {\n\t\ttokenType := tokenizer.Next()\n\t\tif tokenType == html.ErrorToken {\n\t\t\treturn root\n\t\t}\n\t\tcurrent := tokenizer.Token()\n\t\tswitch tokenType {\n\t\tcase html.StartTagToken:\n\t\t\tnode := newNode(current, parent)\n\t\t\tif root == nil {\n\t\t\t\troot = node\n\t\t\t} else {\n\t\t\t\tparent.Children = append(parent.Children, node)\n\t\t\t}\n\t\t\tparent = node\n\t\tcase html.SelfClosingTagToken:\n\t\t\tnode := newNode(current, parent)\n\t\t\tparent.Children = append(parent.Children, node)\n\t\tcase html.EndTagToken:\n\t\t\ttag := parent.FindTagReverse(current.Data)\n\t\t\tif tag != nil {\n\t\t\t\tparent = tag.Parent\n\t\t\t}\n\t\t}\n\t}\n}", "func parseRecurse(tree []node, l *lexer, end *token, depth int) ([]node, error) {\n\tdepth++\n\tif depth > depthLimit {\n\t\treturn nil, fmt.Errorf(\"depth limit %v\", depthLimit)\n\t}\n\tfor {\n\t\tt, err := l.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif t == nil {\n\t\t\tif end != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unclosed scope %v\", end)\n\t\t\t}\n\t\t\treturn tree, nil\n\t\t}\n\t\tswitch t.tt {\n\t\tcase ttArray:\n\t\t\tnodes, err := parseRecurse(make([]node, 0), l, t, depth)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttree = append(tree, &nodeArray{name: t.val, nodes: nodes})\n\t\tcase ttEnd:\n\t\t\tif end == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unopened scope %v\", t)\n\t\t\t}\n\t\t\tif t.val != end.val {\n\t\t\t\treturn nil, fmt.Errorf(\"unmatched tag %v\", t)\n\t\t\t}\n\t\t\treturn tree, nil\n\t\tcase ttIfdef:\n\t\t\tnodes, err := parseRecurse(make([]node, 0), l, t, depth)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttree = append(tree, &nodeIfdef{name: t.val, nodes: nodes})\n\t\tcase ttIfndef:\n\t\t\tnodes, err := parseRecurse(make([]node, 0), l, t, depth)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttree = append(tree, &nodeIfndef{name: t.val, nodes: nodes})\n\t\tcase ttInclude:\n\t\t\ttree = append(tree, &nodeInclude{name: t.val})\n\t\tcase ttObject:\n\t\t\tnodes, err := parseRecurse(make([]node, 0), l, t, depth)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttree = append(tree, &nodeObject{name: t.val, nodes: nodes})\n\t\tcase ttPrint:\n\t\t\ttree = append(tree, &nodePrint{name: t.val})\n\t\tcase ttString:\n\t\t\ttree = append(tree, &nodeString{val: t.val})\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unknown type %q, programmer error\", t.tt))\n\t\t}\n\t}\n}", "func parseGeneral(input []byte, tokens []token, start int, endtokens []string, mi *markupInfo) (string, int) {\n\tmi.depth++\n\tdefer func() {\n\t\tmi.depth--\n\t}()\n\tlistType := \"\"\n\ti := start\n\tresults := []string{}\n\tfor {\n\t\tif i >= len(tokens) {\n\t\t\tbreak\n\t\t}\n\t\tif tokens[i].IsToken {\n\t\t\tif len(endtokens) > 0 && (i+len(endtokens)) <= len(tokens) {\n\t\t\t\tdoret := true\n\t\t\t\tvar j int\n\t\t\t\tfor j = 0; j < len(endtokens); j++ {\n\t\t\t\t\tif tokens[i+j].Val != endtokens[j] {\n\t\t\t\t\t\tdoret = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif doret {\n\t\t\t\t\treturn strings.Join(results, \"\"), i + len(endtokens) - 1\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch {\n\t\t\tcase tokens[i].Val == \"\\n\":\n\t\t\t\tif listType != \"\" {\n\t\t\t\t\tresults = append(results, fmt.Sprintf(\"</%s>\", listType))\n\t\t\t\t\tlistType = \"\"\n\t\t\t\t}\n\t\t\t\tif (i+1) < len(tokens) && tokens[i+1].IsToken && tokens[i+1].Val == \"\\n\" {\n\t\t\t\t\tif mi.inCode {\n\t\t\t\t\t\tresults = append(results, \"\\n\\n\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tresults = append(results, \"\\n<br />\\n<br />\")\n\t\t\t\t\t}\n\t\t\t\t\t// Skip over successive newlines.\n\t\t\t\t\tfor i++; i < len(tokens) && tokens[i].Val == \"\\n\"; i++ {\n\t\t\t\t\t}\n\t\t\t\t\ti--\n\t\t\t\t} else {\n\t\t\t\t\tresults = append(results, \"\\n\")\n\t\t\t\t}\n\t\t\tcase tokens[i].Val == \"\\n*\":\n\t\t\t\tif !mi.inCode && listType != \"ul\" && doesListContinue(tokens, \"\\n*\", i) {\n\t\t\t\t\tresults = append(results, \"<ul>\")\n\t\t\t\t\tlistType = \"ul\"\n\t\t\t\t}\n\t\t\t\tif listType != \"\" {\n\t\t\t\t\tresults = append(results, \"<li>\")\n\t\t\t\t} else {\n\t\t\t\t\tresults = append(results, tokens[i].Val)\n\t\t\t\t}\n\t\t\tcase tokens[i].Val == \"\\n#\":\n\t\t\t\tif !mi.inCode && listType != \"ol\" && doesListContinue(tokens, \"\\n#\", i) {\n\t\t\t\t\tresults = append(results, \"<ol>\")\n\t\t\t\t\tlistType = \"ol\"\n\t\t\t\t}\n\t\t\t\tif listType != \"\" {\n\t\t\t\t\tresults = append(results, \"<li>\")\n\t\t\t\t} else {\n\t\t\t\t\tresults = append(results, tokens[i].Val)\n\t\t\t\t}\n\t\t\tcase tokens[i].Val == \"{{\":\n\t\t\t\tbody, eidx := parseTemplate(input, tokens, i, mi)\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\tcase len(tokens) > (i+1) && tokens[i].Val == \"[\" && tokens[i+1].Val == \"[\":\n\t\t\t\tbody, eidx := parseInternalLink(input, tokens, i+1, mi)\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\tcase tokens[i].Val == \"[\":\n\t\t\t\tbody, eidx := parseExternalLink(input, tokens, i, mi)\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\tcase tokens[i].Val[0] == '\\'':\n\t\t\t\tbody, eidx := parseMarkup(input, tokens, i, mi)\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\tcase tokens[i].Val[0] == '=':\n\t\t\t\tbody, eidx := parseHeader(input, tokens, i, mi)\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\tcase len(tokens[i].Val) > 5 && tokens[i].Val[0:5] == \"<code\":\n\t\t\t\tbody, eidx := parseCode(input, tokens, i, mi, \"</code>\")\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\tcase len(tokens[i].Val) > 7 && tokens[i].Val[0:7] == \"<source\":\n\t\t\t\tbody, eidx := parseCode(input, tokens, i, mi, \"</source>\")\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\tcase len(tokens[i].Val) > 4 && tokens[i].Val[0:4] == \"<ref\":\n\t\t\t\tbody, eidx := parseReference(input, tokens, i, mi)\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\tcase len(tokens[i].Val) > 7 && tokens[i].Val[0:7] == \"<nowiki\":\n\t\t\t\tbody, eidx := parseNowiki(input, tokens, i, mi)\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\t\t// The last case for html tags: <.*>, including pre, /pre, etc.\n\t\t\tcase len(tokens[i].Val) > 1 && tokens[i].Val[0:1] == \"<\":\n\t\t\t\tbody, eidx := parseHtml(input, tokens, i, mi, \"</pre>\")\n\t\t\t\tresults = append(results, body)\n\t\t\t\ti = eidx\n\t\t\tcase tokens[i].Val == \"]\":\n\t\t\t\t// This happens a lot. No biggie.\n\t\t\t\tresults = append(results, \"]\")\n\t\t\tdefault:\n\t\t\t\tif endtokens != nil {\n\t\t\t\t\tfmt.Printf(\"Don't know what to do with token \\\"%s\\\". endtokens is \\\"%v\\\"\\n\", tokens[i].Val, endtokens)\n\t\t\t\t\tfmt.Printf(\"Tokens[i].Val is: %s\\n\", tokens[i].Val)\n\t\t\t\t\tfmt.Printf(\"Tokens[i-1].Val is: %s\\n\", tokens[i-1].Val)\n\t\t\t\t\tfmt.Printf(\"Tokens[i-2].Val is: %s\\n\", tokens[i-2].Val)\n\t\t\t\t\tfmt.Printf(\"Tokens[i-3].Val is: %s\\n\", tokens[i-3].Val)\n\t\t\t\t\tfmt.Printf(\"Tokens[i-4].Val is: %s\\n\", tokens[i-4].Val)\n\t\t\t\t\tfmt.Printf(\" Start is: \\\"%s\\\"\\n\", tokens[start].Val)\n\t\t\t\t\tfmt.Printf(\" Opener was: %s\\n\", tokens[start-1].Val)\n\t\t\t\t\tfmt.Printf(\" Pre-Opener was: %s\\n\", tokens[start-2].Val)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"Don't know what to do with token '%s'. No endtokens\\n\", tokens[i].Val)\n\t\t\t\t}\n\t\t\t\tresults = append(results, unparseEntities(tokens[i].Val))\n\t\t\t}\n\t\t} else {\n\t\t\tresults = append(results, parsePlainText(string(tokens[i].Val)))\n\t\t}\n\t\ti += 1\n\t}\n\treturn strings.Join(results, \"\"), i\n}", "func (p *Parser) parse(prefix string, s *ast.StructType) {\n\t// stop recursion for nested structs\n\tif s == nil {\n\t\treturn\n\t}\n\n\tfor _, f := range s.Fields.List {\n\t\terr := &ErrSyntax{line: p.file.Line(f.Pos())}\n\n\t\tvar (\n\t\t\ttag = err.append(p.parseTag(f)).(*types.Tag)\n\t\t\tname = prefix + err.append(p.parseName(f)).(string)\n\t\t\ttypeID = err.append(p.parseType(f)).(string)\n\t\t)\n\n\t\tswitch {\n\t\tcase err.errs != nil:\n\t\t\tp.errors = append(p.errors, err)\n\t\tcase tag == nil:\n\t\t\tp.parse(name+\".\", p.parseSubType(f))\n\t\tdefault:\n\t\t\tif tag.EnvVar == \"\" {\n\t\t\t\ttag.EnvVar = strings.ToUpper(strings.ReplaceAll(name, \".\", \"_\"))\n\t\t\t}\n\n\t\t\t// because names unique (because of prefix) we can omit checks like \"Did we find already?\"\n\t\t\tp.fields[name] = &types.Field{\n\t\t\t\tName: name,\n\t\t\t\tType: typeID,\n\t\t\t\tEnvVar: tag.EnvVar,\n\t\t\t\tAction: tag.Action,\n\t\t\t}\n\t\t}\n\t}\n}", "func unnest(tree antlr.ParseTree) antlr.ParseTree {\n\tfor tree != nil {\n\t\tswitch t := tree.(type) {\n\t\tcase *gen.ExprContext:\n\t\t\t// conditionalOr op='?' conditionalOr : expr\n\t\t\tif t.GetOp() != nil {\n\t\t\t\treturn t\n\t\t\t}\n\t\t\t// conditionalOr\n\t\t\ttree = t.GetE()\n\t\tcase *gen.ConditionalOrContext:\n\t\t\t// conditionalAnd (ops=|| conditionalAnd)*\n\t\t\tif t.GetOps() != nil && len(t.GetOps()) > 0 {\n\t\t\t\treturn t\n\t\t\t}\n\t\t\t// conditionalAnd\n\t\t\ttree = t.GetE()\n\t\tcase *gen.ConditionalAndContext:\n\t\t\t// relation (ops=&& relation)*\n\t\t\tif t.GetOps() != nil && len(t.GetOps()) > 0 {\n\t\t\t\treturn t\n\t\t\t}\n\t\t\t// relation\n\t\t\ttree = t.GetE()\n\t\tcase *gen.RelationContext:\n\t\t\t// relation op relation\n\t\t\tif t.GetOp() != nil {\n\t\t\t\treturn t\n\t\t\t}\n\t\t\t// calc\n\t\t\ttree = t.Calc()\n\t\tcase *gen.CalcContext:\n\t\t\t// calc op calc\n\t\t\tif t.GetOp() != nil {\n\t\t\t\treturn t\n\t\t\t}\n\t\t\t// unary\n\t\t\ttree = t.Unary()\n\t\tcase *gen.MemberExprContext:\n\t\t\t// member expands to one of: primary, select, index, or create message\n\t\t\ttree = t.Member()\n\t\tcase *gen.PrimaryExprContext:\n\t\t\t// primary expands to one of identifier, nested, create list, create struct, literal\n\t\t\ttree = t.Primary()\n\t\tcase *gen.NestedContext:\n\t\t\t// contains a nested 'expr'\n\t\t\ttree = t.GetE()\n\t\tcase *gen.ConstantLiteralContext:\n\t\t\t// expands to a primitive literal\n\t\t\ttree = t.Literal()\n\t\tdefault:\n\t\t\treturn t\n\t\t}\n\t}\n\treturn tree\n}", "func parseTop(ds *docState) {\n\tif n, ok := parseNode(ds); ok {\n\t\tif n != nil {\n\t\t\tds.appendNodes(n)\n\t\t}\n\t\treturn\n\t}\n\tds.push(nil)\n\tnn := parseSubtree(ds)\n\tds.pop()\n\tds.appendNodes(parser.CompactNodes(nn)...)\n}", "func parse(input []byte, vars []Variable) (node Node, err error) {\n\t// Tokenize\n\ttokens, err := scanInput(input)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Convert to AST\n\tnodes, endPos, err := parseExpression(tokens, vars, 0)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(nodes) > 1 {\n\t\terr = fmt.Errorf(\"couldn't flatten down to one node:\\n%+v\", nodes)\n\t\treturn\n\t}\n\n\tif endPos < len(tokens)-1 {\n\t\terr = fmt.Errorf(\"Parsing tokens ended at %d, but expected %d\", endPos, len(tokens)-1)\n\t\treturn\n\t}\n\n\tnode = nodes[0]\n\n\treturn\n}", "func (pars *Parser) parseGroupedExpression() tree.Expression {\n\tpars.nextToken()\n\n\texp := pars.parseExpression(LOWEST)\n\tif !pars.expectPeek(lexer.RPAR) {\n\t\treturn nil\n\t}\n\treturn exp\n}", "func TestSimpleParserSubContext1(t *testing.T) {\n\tassert := assert.New(t)\n\n\ttestString := \"true || name.first == 'Neil'\"\n\tctx, err := NewExpressionParserCtx(testString)\n\tassert.Equal(fieldMode, ctx.subCtx.currentMode)\n\terr = ctx.parse()\n\tassert.Nil(err)\n\tassert.Equal(5, len(ctx.parserDataNodes))\n\tassert.Equal(5, ctx.parserTree.NumNodes())\n\tnode := ctx.parserDataNodes[ctx.subCtx.lastParserDataNode]\n\tassert.NotNil(node)\n\n\tassert.Equal(1, ctx.treeHeadIndex)\n\tassert.Equal(-1, ctx.parserTree.data[1].ParentIdx)\n\tassert.Equal(3, ctx.parserTree.data[1].Right)\n\tassert.Equal(0, ctx.parserTree.data[1].Left)\n\tassert.Equal(1, ctx.parserTree.data[3].ParentIdx)\n\tassert.Equal(4, ctx.parserTree.data[3].Right)\n\tassert.Equal(2, ctx.parserTree.data[3].Left)\n}", "func (this *Codec) deserialize1(data string) *TreeNode {\n\tc := strings.Split(data, \",\")\n\n\tif len(data) == 0 {\n\t\treturn nil\n\t}\n\n\tt := &TreeNode{Val: myAtoi(c[0])}\n\tqueue := []*TreeNode{t}\n\n\ti := 1\n\tfor len(queue) > 0 {\n\t\tl := len(queue)\n\t\tfor j := 0; j < l; j++ {\n\t\t\tif c[i] == \"nil\" {\n\t\t\t\tqueue[j].Left = nil\n\t\t\t} else {\n\t\t\t\tqueue[j].Left = &TreeNode{Val: myAtoi(c[i])}\n\t\t\t\tqueue = append(queue, queue[j].Left)\n\t\t\t}\n\t\t\ti++\n\t\t\tif c[i] == \"nil\" {\n\t\t\t\tqueue[j].Right = nil\n\t\t\t} else {\n\t\t\t\tqueue[j].Right = &TreeNode{Val: myAtoi(c[i])}\n\t\t\t\tqueue = append(queue, queue[j].Right)\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tqueue = queue[l:]\n\t}\n\treturn t\n}", "func parseNext(v rune, node **node) {\n\tv2 := string(v)\n\n\tswitch v2 {\n\tcase \"[\":\n\t\tif checkName() {\n\t\t\taddNode(node, 1)\n\t\t}\n\tcase \"]\":\n\t\tif checkName() {\n\t\t\taddNode(node, 2)\n\t\t}\n\tcase \",\":\n\t\tif checkName() {\n\t\t\taddNode(node, 0)\n\t\t}\n\tdefault:\n\t\tif syntax.IsWordChar(v) {\n\t\t\tscan.name += string(v) // Accept only alphanumeric (No escapes)\n\t\t}\n\t}\n}", "func parse(name, src string) ([]node, error) {\n\treturn parseRecurse(make([]node, 0), newLexer(name, src), nil, 0)\n}", "func parseTopLevel(fset *token.FileSet, buf *Buffer) (*ast.File, error) {\n\tsrc := []byte(\"package p\\n\" + buf.String())\n\treturn parser.ParseFile(fset, \"<input>\", src, parser.DeclarationErrors|parser.ParseComments)\n}", "func parseOne(data string) (*eskip.Route, error) {\n\tr, err := eskip.Parse(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(r) != 1 {\n\t\treturn nil, errors.New(\"invalid route entry: multiple route expressions\")\n\t}\n\n\treturn r[0], nil\n}", "func (p *Parser) parseSubExpr() {\n\tdefer un(trace(p, \"parseSubExpr\"))\n}", "func TestSimpleParserSubContext1(t *testing.T) {\n\tassert := assert.New(t)\n\n\ttestString := \"true || `name`.`first` == \\\"Neil\\\"\"\n\tctx, err := NewExpressionParserCtx(testString)\n\tassert.Equal(fieldMode, ctx.subCtx.currentMode)\n\terr = ctx.parse()\n\tassert.Nil(err)\n\tassert.Equal(5, len(ctx.parserDataNodes))\n\tassert.Equal(5, ctx.parserTree.NumNodes())\n\tnode := ctx.parserDataNodes[ctx.subCtx.lastParserDataNode]\n\tassert.NotNil(node)\n\n\tassert.Equal(1, ctx.treeHeadIndex)\n\tassert.Equal(-1, ctx.parserTree.data[1].ParentIdx)\n\tassert.Equal(3, ctx.parserTree.data[1].Right)\n\tassert.Equal(0, ctx.parserTree.data[1].Left)\n\tassert.Equal(1, ctx.parserTree.data[3].ParentIdx)\n\tassert.Equal(4, ctx.parserTree.data[3].Right)\n\tassert.Equal(2, ctx.parserTree.data[3].Left)\n}", "func (yp *YamlParser) parseLine() error {\n\tvar rawIndent = yp.consumeSpaces()\n\n\tif yp.pick() == TkComment {\n\t\t// Nothing but comment, skip\n\t\treturn nil\n\t}\n\n\tindent, err := yp.determineIndent(rawIndent)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check for value type many\n\tif indent >= yp.prevIndent && yp.isListType() {\n\t\typ.getListValue()\n\t\treturn nil\n\t}\n\n\t// From now on it can only be a new node\n\n\tindentShift := int(indent) - int(yp.prevIndent)\n\n\tif indentShift > 1 {\n\t\treturn yp.err(\"Syntax Error! Invalid indent (no parent)\")\n\t}\n\n\tparentNodePtr := yp.currentNode\n\n\tif indentShift <= 0 && yp.currentNode.parent != nil {\n\t\t// Rewind to correct node\n\t\tfor i := 0; i < (-indentShift)+1; i++ {\n\t\t\tparentNodePtr = parentNodePtr.parent\n\t\t}\n\t}\n\n\typ.currentNode = NewChildNode(parentNodePtr)\n\n\typ.processKey()\n\n\tif yp.move(len(TkPostKey)) {\n\t\typ.consumeSpaces()\n\t\typ.processValue()\n\t}\n\n\typ.prevIndent = indent\n\n\treturn nil\n}", "func (_Logger *LoggerFilterer) ParseDepositSubTreeReady(log types.Log) (*LoggerDepositSubTreeReady, error) {\n\tevent := new(LoggerDepositSubTreeReady)\n\tif err := _Logger.contract.UnpackLog(event, \"DepositSubTreeReady\", log); err != nil {\n\t\treturn nil, err\n\t}\n\treturn event, nil\n}", "func parseLevel(level string) logrus.Level {\n\tl, err := logrus.ParseLevel(level)\n\tif err != nil {\n\t\tlog.Fatalf(\"parse log level failed, %s\", err)\n\t}\n\treturn l\n}", "func (p *parser) parseExpr() tree.Expr {\n\treturn p.parseSubexpr(0)\n}", "func (parser *Parser) Parse(l RawLog) {\n\t// empty log\n\tif len(l) == 0 {\n\t\treturn\n\t}\n\n\tfirst := strings.TrimSpace(l[0])\n\tif len(l) == 1 {\n\t\tswitch {\n\t\tcase strings.HasPrefix(first, \"[UnityCrossThreadLogger]\"):\n\t\t\tline := strings.TrimPrefix(first, \"[UnityCrossThreadLogger]\")\n\t\t\tif parser.onSingleTreadLog != nil {\n\t\t\t\tparser.onSingleTreadLog(line)\n\t\t\t}\n\t\t\tparser.parseSingleTreadLog(line)\n\t\tcase strings.HasPrefix(first, \"<<<<<<<<<<\"):\n\t\t\tparser.parserZoneChange(first)\n\t\tdefault:\n\t\t\tif parser.onUnknownLog != nil {\n\t\t\t\tparser.onUnknownLog(fmt.Sprintf(\"Unparsed log: %s\", first))\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tswitch remaining := l[1:]; {\n\tcase strings.HasPrefix(first, \"[UnityCrossThreadLogger]\"):\n\t\tthreadLog := thread.NewLog(strings.TrimPrefix(first, \"[UnityCrossThreadLogger]\"), remaining)\n\t\tif threadLog.Type == \"\" {\n\t\t\tif parser.onUnknownLog != nil {\n\t\t\t\tparser.onUnknownLog(fmt.Sprintf(\"Unparsed thread log: %s\\n%s\", first, remaining))\n\t\t\t}\n\t\t}\n\t\tif parser.onThreadLog != nil {\n\t\t\tparser.onThreadLog(threadLog)\n\t\t}\n\t\tparser.parseMultilineTreadLog(threadLog)\n\tdefault:\n\t\tif parser.onUnknownLog != nil {\n\t\t\tparser.onUnknownLog(fmt.Sprintf(\"Unparsed log: %s\\n%s\", first, remaining))\n\t\t}\n\t}\n}", "func ParseLevel(str string) (Level, bool) {\n\tupper := strings.ToUpper(str)\n\tfor i, v := range Level2Str {\n\t\tif v == upper {\n\t\t\treturn Level(i), true\n\t\t}\n\t}\n\treturn InfoLevel, false\n}", "func (_Logger *LoggerFilterer) ParseDepositLeafMerged(log types.Log) (*LoggerDepositLeafMerged, error) {\n\tevent := new(LoggerDepositLeafMerged)\n\tif err := _Logger.contract.UnpackLog(event, \"DepositLeafMerged\", log); err != nil {\n\t\treturn nil, err\n\t}\n\treturn event, nil\n}", "func Parse(fullline string) Node {\n\tline := fullline\n\n\t// This is a special case. I'm not sure if it's a bug in the clang AST\n\t// dumper. It should have children.\n\tif line == \"array filler\" {\n\t\treturn parseArrayFiller(line)\n\t}\n\n\tparts := strings.SplitN(line, \" \", 2)\n\tnodeName := parts[0]\n\n\tif nodeName == \"super\" || nodeName == \"getter\" { // special ObjC case\n\t\tparts = strings.SplitN(parts[1], \" \", 2)\n\t\tnodeName += \" \" + parts[0]\n\t}\n\t// skip node name\n\tif len(parts) > 1 {\n\t\tline = parts[1]\n\t}\n\n\tswitch nodeName {\n\tcase \"AlignedAttr\":\n\t\treturn parseAlignedAttr(line)\n\tcase \"AllocSizeAttr\":\n\t\treturn parseAllocSizeAttr(line)\n\tcase \"AlwaysInlineAttr\":\n\t\treturn parseAlwaysInlineAttr(line)\n\tcase \"ArraySubscriptExpr\":\n\t\treturn parseArraySubscriptExpr(line)\n\tcase \"ArcWeakrefUnavailableAttr\":\n\t\treturn parseArcWeakrefUnavailableAttr(line)\n\tcase \"AsmLabelAttr\":\n\t\treturn parseAsmLabelAttr(line)\n\tcase \"AttributedType\":\n\t\treturn parseAttributedType(line)\n\tcase \"AvailabilityAttr\":\n\t\treturn parseAvailabilityAttr(line)\n\tcase \"BinaryOperator\":\n\t\treturn parseBinaryOperator(line)\n\tcase \"BlockCommandComment\":\n\t\treturn parseBlockCommandComment(line)\n\tcase \"BlockPointerType\":\n\t\treturn parseBlockPointerType(line)\n\tcase \"BreakStmt\":\n\t\treturn parseBreakStmt(line)\n\tcase \"BuiltinType\":\n\t\treturn parseBuiltinType(line)\n\tcase \"CallExpr\":\n\t\treturn parseCallExpr(line)\n\tcase \"ConvertVectorExpr\":\n\t\treturn parseConvertVectorExpr(line)\n\tcase \"CaseStmt\":\n\t\treturn parseCaseStmt(line)\n\tcase \"CFAuditedTransferAttr\":\n\t\treturn parseCFAuditedTransferAttr(line)\n\tcase \"CFConsumedAttr\":\n\t\treturn parseCFConsumedAttr(line)\n\tcase \"CFReturnsRetainedAttr\":\n\t\treturn parseCFReturnsRetainedAttr(line)\n\tcase \"CFReturnsNotRetainedAttr\":\n\t\treturn parseCFReturnsNotRetainedAttr(line)\n\tcase \"CharacterLiteral\":\n\t\treturn parseCharacterLiteral(line)\n\tcase \"CompoundLiteralExpr\":\n\t\treturn parseCompoundLiteralExpr(line)\n\tcase \"CompoundStmt\":\n\t\treturn parseCompoundStmt(line)\n\tcase \"ConditionalOperator\":\n\t\treturn parseConditionalOperator(line)\n\tcase \"ConstAttr\":\n\t\treturn parseConstAttr(line)\n\tcase \"ConstantArrayType\":\n\t\treturn parseConstantArrayType(line)\n\tcase \"ContinueStmt\":\n\t\treturn parseContinueStmt(line)\n\tcase \"CompoundAssignOperator\":\n\t\treturn parseCompoundAssignOperator(line)\n\tcase \"CStyleCastExpr\":\n\t\treturn parseCStyleCastExpr(line)\n\tcase \"DeclRefExpr\":\n\t\treturn parseDeclRefExpr(line)\n\tcase \"DeclStmt\":\n\t\treturn parseDeclStmt(line)\n\tcase \"DefaultStmt\":\n\t\treturn parseDefaultStmt(line)\n\tcase \"DeprecatedAttr\":\n\t\treturn parseDeprecatedAttr(line)\n\tcase \"DisableTailCallsAttr\":\n\t\treturn parseDisableTailCallsAttr(line)\n\tcase \"DoStmt\":\n\t\treturn parseDoStmt(line)\n\tcase \"ElaboratedType\":\n\t\treturn parseElaboratedType(line)\n\tcase \"EmptyDecl\":\n\t\treturn parseEmptyDecl(line)\n\tcase \"Enum\":\n\t\treturn parseEnum(line)\n\tcase \"EnumConstantDecl\":\n\t\treturn parseEnumConstantDecl(line)\n\tcase \"EnumDecl\":\n\t\treturn parseEnumDecl(line)\n\tcase \"EnumExtensibilityAttr\":\n\t\treturn parseEnumExtensibilityAttr(line)\n\tcase \"EnumType\":\n\t\treturn parseEnumType(line)\n\tcase \"Field\":\n\t\treturn parseField(line)\n\tcase \"FieldDecl\":\n\t\treturn parseFieldDecl(line)\n\tcase \"FlagEnumAttr\":\n\t\treturn parseFlagEnumAttr(line)\n\tcase \"FloatingLiteral\":\n\t\treturn parseFloatingLiteral(line)\n\tcase \"FormatAttr\":\n\t\treturn parseFormatAttr(line)\n\tcase \"FormatArgAttr\":\n\t\treturn parseFormatArgAttr(line)\n\tcase \"FunctionDecl\":\n\t\treturn parseFunctionDecl(line)\n\tcase \"FullComment\":\n\t\treturn parseFullComment(line)\n\tcase \"FunctionProtoType\":\n\t\treturn parseFunctionProtoType(line)\n\tcase \"ForStmt\":\n\t\treturn parseForStmt(line)\n\tcase \"HTMLStartTagComment\":\n\t\treturn parseHTMLStartTagComment(line)\n\tcase \"HTMLEndTagComment\":\n\t\treturn parseHTMLEndTagComment(line)\n\tcase \"GCCAsmStmt\":\n\t\treturn parseGCCAsmStmt(line)\n\tcase \"GotoStmt\":\n\t\treturn parseGotoStmt(line)\n\tcase \"IBActionAttr\":\n\t\treturn parseIBActionAttr(line)\n\tcase \"IBOutletAttr\":\n\t\treturn parseIBOutletAttr(line)\n\tcase \"IfStmt\":\n\t\treturn parseIfStmt(line)\n\tcase \"ImplicitCastExpr\":\n\t\treturn parseImplicitCastExpr(line)\n\tcase \"ImplicitValueInitExpr\":\n\t\treturn parseImplicitValueInitExpr(line)\n\tcase \"IncompleteArrayType\":\n\t\treturn parseIncompleteArrayType(line)\n\tcase \"IndirectFieldDecl\":\n\t\treturn parseIndirectFieldDecl(line)\n\tcase \"InitListExpr\":\n\t\treturn parseInitListExpr(line)\n\tcase \"InlineCommandComment\":\n\t\treturn parseInlineCommandComment(line)\n\tcase \"IntegerLiteral\":\n\t\treturn parseIntegerLiteral(line)\n\tcase \"LabelStmt\":\n\t\treturn parseLabelStmt(line)\n\tcase \"MallocAttr\":\n\t\treturn parseMallocAttr(line)\n\tcase \"MaxFieldAlignmentAttr\":\n\t\treturn parseMaxFieldAlignmentAttr(line)\n\tcase \"MayAliasAttr\":\n\t\treturn parseMayAliasAttr(line)\n\tcase \"MemberExpr\":\n\t\treturn parseMemberExpr(line)\n\tcase \"MinVectorWidthAttr\":\n\t\treturn parseMinVectorWidthAttr(line)\n\tcase \"ModeAttr\":\n\t\treturn parseModeAttr(line)\n\tcase \"NoDebugAttr\":\n\t\treturn parseNoDebugAttr(line)\n\tcase \"NoEscapeAttr\":\n\t\treturn parseNoEscapeAttr(line)\n\tcase \"NoInlineAttr\":\n\t\treturn parseNoInlineAttr(line)\n\tcase \"NoThrowAttr\":\n\t\treturn parseNoThrowAttr(line)\n\tcase \"NonNullAttr\":\n\t\treturn parseNonNullAttr(line)\n\tcase \"NotTailCalledAttr\":\n\t\treturn parseNotTailCalledAttr(line)\n\tcase \"NSConsumedAttr\":\n\t\treturn parseNSConsumedAttr(line)\n\tcase \"NSConsumesSelfAttr\":\n\t\treturn parseNSConsumesSelfAttr(line)\n\tcase \"NSErrorDomainAttr\":\n\t\treturn parseNSErrorDomainAttr(line)\n\tcase \"NSReturnsRetainedAttr\":\n\t\treturn parseNSReturnsRetainedAttr(line)\n\tcase \"ObjCBoolLiteralExpr\":\n\t\treturn parseObjCBoolLiteralExpr(line)\n\tcase \"ObjCBoxableAttr\":\n\t\treturn parseObjCBoxableAttr(line)\n\tcase \"ObjCBridgeAttr\":\n\t\treturn parseObjCBridgeAttr(line)\n\tcase \"ObjCBridgeRelatedAttr\":\n\t\treturn parseObjCBridgeRelatedAttr(line)\n\tcase \"ObjCBridgeMutableAttr\":\n\t\treturn parseObjCBridgeMutableAttr(line)\n\tcase \"ObjCCategoryDecl\":\n\t\treturn parseObjCCategoryDecl(line)\n\tcase \"ObjCDesignatedInitializerAttr\":\n\t\treturn parseObjCDesignatedInitializerAttr(line)\n\tcase \"ObjCExceptionAttr\":\n\t\treturn parseObjCExceptionAttr(line)\n\tcase \"ObjCExplicitProtocolImplAttr\":\n\t\treturn parseObjCExplicitProtocolImplAttr(line)\n\tcase \"ObjCIndependentClassAttr\":\n\t\treturn parseObjCIndependentClassAttr(line)\n\tcase \"ObjCInterface\":\n\t\treturn parseObjCInterface(line, false)\n\tcase \"super ObjCInterface\":\n\t\treturn parseObjCInterface(line, true)\n\tcase \"ObjCInterfaceDecl\":\n\t\treturn parseObjCInterfaceDecl(line)\n\tcase \"ObjCInterfaceType\":\n\t\treturn parseObjCInterfaceType(line)\n\tcase \"ObjCIvarDecl\":\n\t\treturn parseObjCIvarDecl(line)\n\tcase \"getter ObjCMethod\":\n\t\treturn parseObjCMethod(line)\n\tcase \"ObjCMethod\":\n\t\treturn parseObjCMethod(line)\n\tcase \"ObjCMessageExpr\":\n\t\treturn parseObjCMessageExpr(line)\n\tcase \"ObjCMethodDecl\":\n\t\treturn parseObjCMethodDecl(line)\n\tcase \"ObjCObjectType\":\n\t\treturn parseObjCObjectType(line)\n\tcase \"ObjCObjectPointerType\":\n\t\treturn parseObjCObjectPointerType(line)\n\tcase \"ObjCProtocol\":\n\t\treturn parseObjCProtocol(line)\n\tcase \"ObjCReturnsInnerPointerAttr\":\n\t\treturn parseObjCReturnsInnerPointerAttr(line)\n\tcase \"ObjCRequiresSuperAttr\":\n\t\treturn parseObjCRequiresSuperAttr(line)\n\tcase \"ObjCProtocolDecl\":\n\t\treturn parseObjCProtocolDecl(line)\n\tcase \"ObjCRootClassAttr\":\n\t\treturn parseObjCRootClassAttr(line)\n\tcase \"ObjCPropertyDecl\":\n\t\treturn parseObjCPropertyDecl(line)\n\tcase \"ObjCTypeParamDecl\":\n\t\treturn parseObjCTypeParamDecl(line)\n\tcase \"OffsetOfExpr\":\n\t\treturn parseOffsetOfExpr(line)\n\tcase \"PackedAttr\":\n\t\treturn parsePackedAttr(line)\n\tcase \"ParagraphComment\":\n\t\treturn parseParagraphComment(line)\n\tcase \"ParamCommandComment\":\n\t\treturn parseParamCommandComment(line)\n\tcase \"ParenExpr\":\n\t\treturn parseParenExpr(line)\n\tcase \"ParenType\":\n\t\treturn parseParenType(line)\n\tcase \"ParmVarDecl\":\n\t\treturn parseParmVarDecl(line)\n\tcase \"PointerType\":\n\t\treturn parsePointerType(line)\n\tcase \"DecayedType\":\n\t\treturn parseDecayedType(line)\n\tcase \"PredefinedExpr\":\n\t\treturn parsePredefinedExpr(line)\n\tcase \"PureAttr\":\n\t\treturn parsePureAttr(line)\n\tcase \"QualType\":\n\t\treturn parseQualType(line)\n\tcase \"Record\":\n\t\treturn parseRecord(line)\n\tcase \"RecordDecl\":\n\t\treturn parseRecordDecl(line)\n\tcase \"RecordType\":\n\t\treturn parseRecordType(line)\n\tcase \"RestrictAttr\":\n\t\treturn parseRestrictAttr(line)\n\tcase \"ReturnStmt\":\n\t\treturn parseReturnStmt(line)\n\tcase \"ReturnsTwiceAttr\":\n\t\treturn parseReturnsTwiceAttr(line)\n\tcase \"SentinelAttr\":\n\t\treturn parseSentinelAttr(line)\n\tcase \"ShuffleVectorExpr\":\n\t\treturn parseShuffleVectorExpr(line)\n\tcase \"StmtExpr\":\n\t\treturn parseStmtExpr(line)\n\tcase \"StringLiteral\":\n\t\treturn parseStringLiteral(line)\n\tcase \"SwiftBridgedTypedefAttr\":\n\t\treturn parseSwiftBridgedTypedefAttr(line)\n\tcase \"SwiftErrorAttr\":\n\t\treturn parseSwiftErrorAttr(line)\n\tcase \"SwiftNameAttr\":\n\t\treturn parseSwiftNameAttr(line)\n\tcase \"SwiftNewtypeAttr\":\n\t\treturn parseSwiftNewtypeAttr(line)\n\tcase \"SwiftPrivateAttr\":\n\t\treturn parseSwiftPrivateAttr(line)\n\tcase \"SwitchStmt\":\n\t\treturn parseSwitchStmt(line)\n\tcase \"TargetAttr\":\n\t\treturn parseTargetAttr(line)\n\tcase \"TextComment\":\n\t\treturn parseTextComment(line)\n\tcase \"TranslationUnitDecl\":\n\t\treturn parseTranslationUnitDecl(line)\n\tcase \"TransparentUnionAttr\":\n\t\treturn parseTransparentUnionAttr(line)\n\tcase \"Typedef\":\n\t\treturn parseTypedef(line)\n\tcase \"TypedefDecl\":\n\t\treturn parseTypedefDecl(line)\n\tcase \"TypedefType\":\n\t\treturn parseTypedefType(line)\n\tcase \"UnaryExprOrTypeTraitExpr\":\n\t\treturn parseUnaryExprOrTypeTraitExpr(line)\n\tcase \"UnaryOperator\":\n\t\treturn parseUnaryOperator(line)\n\tcase \"UnavailableAttr\":\n\t\treturn parseUnavailableAttr(line)\n\tcase \"UsedAttr\":\n\t\treturn parseUsedAttr(line)\n\tcase \"UnusedAttr\":\n\t\treturn parseUnusedAttr(line)\n\tcase \"VAArgExpr\":\n\t\treturn parseVAArgExpr(line)\n\tcase \"VarDecl\":\n\t\treturn parseVarDecl(line)\n\tcase \"VectorType\":\n\t\treturn parseVectorType(line)\n\tcase \"VerbatimBlockComment\":\n\t\treturn parseVerbatimBlockComment(line)\n\tcase \"VerbatimBlockLineComment\":\n\t\treturn parseVerbatimBlockLineComment(line)\n\tcase \"VerbatimLineComment\":\n\t\treturn parseVerbatimLineComment(line)\n\tcase \"VisibilityAttr\":\n\t\treturn parseVisibilityAttr(line)\n\tcase \"WarnUnusedResultAttr\":\n\t\treturn parseWarnUnusedResultAttr(line)\n\tcase \"WeakAttr\":\n\t\treturn parseWeakAttr(line)\n\tcase \"WeakImportAttr\":\n\t\treturn parseWeakImportAttr(line)\n\tcase \"WhileStmt\":\n\t\treturn parseWhileStmt(line)\n\tcase \"...\":\n\t\treturn parseVariadic(line)\n\tcase \"NullStmt\":\n\t\treturn nil\n\tdefault:\n\t\treturn parseUnknown(nodeName, line)\n\t}\n}", "func (b *Block) Parse() error {\n\tstartPos := uint64(0)\n\tb.subelements = []*Block{}\n\tfor startPos < uint64(len(b.value)) {\n\t\tblock, blockLen, err := DecodeBlock(b.value[startPos:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb.subelements = append(b.subelements, block)\n\t\tstartPos += blockLen\n\t}\n\treturn nil\n}", "func ParseAuthLevel(name string) Level {\n\tswitch name {\n\tcase \"anon\", \"ANON\":\n\t\treturn LevelAnon\n\tcase \"auth\", \"AUTH\":\n\t\treturn LevelAuth\n\tcase \"root\", \"ROOT\":\n\t\treturn LevelRoot\n\tdefault:\n\t\treturn LevelNone\n\t}\n}", "func (p *Parser) parseGroupedExpression() ast.Expression {\n\tp.nextToken()\n\n\texp := p.parseExpression(LOWEST)\n\n\tif !p.expectPeek(token.RPAREN) {\n\t\treturn nil\n\t}\n\n\treturn exp\n}", "func parseParagraph(p *blockParser) stateFn {\n\tfor r := p.next(); r != eof; r = p.next() {\n\t\tif r == '\\n' {\n\t\t\tr = p.peek()\n\t\t\t// Head type has tailling ----- (H2) or ====== (H1)\n\t\t\tif r == '-' || r == '=' {\n\t\t\t\tp.consume(r)\n\t\t\t\tr1 := p.peek(2)\n\t\t\t\tif r1 == '\\n' {\n\t\t\t\t\tp.next()\n\t\t\t\t}\n\t\t\t\tcontent := p.src[p.start:p.cur]\n\t\t\t\tcontent = regexp.MustCompile(\"\\n?\"+string(r)+\"*\\n?\").ReplaceAll(content, []byte{})\n\n\t\t\t\tvar level int\n\t\t\t\tif r == '-' {\n\t\t\t\t\tlevel = 2\n\t\t\t\t}\n\t\t\t\tif r == '=' {\n\t\t\t\t\tlevel = 1\n\t\t\t\t}\n\n\t\t\t\thead := &Head{level, content}\n\t\t\t\tp.emit(head)\n\t\t\t\treturn parseBegin\n\t\t\t}\n\t\t\tif r == '\\n' {\n\t\t\t\tp.next()\n\t\t\t\tgoto emit\n\t\t\t}\n\t\t}\n\t}\nemit:\n\tcontent := p.src[p.start:p.cur]\n\tcontent = regexp.MustCompile(\"\\n{0,2}$\").ReplaceAll(content, []byte{})\n\tparagraph := &Paragraph{content: content}\n\tp.emit(paragraph)\n\treturn parseBegin\n\n}", "func ParseOne(sql string) (Statement, error) {\n\tvar p Parser\n\treturn p.parseOneWithDepth(1, sql)\n}", "func (r Rule) parse() (match, cond, result string) {\n\ts := strings.Split(r.rule, \"->\")\n\tif len(s) != 2 {\n\t\tlog.Fatalf(\"no arrow in %s\", r)\n\t}\n\tmatch = normalizeSpaces(s[0])\n\tresult = normalizeSpaces(s[1])\n\tcond = \"\"\n\tif i := strings.Index(match, \"&&\"); i >= 0 {\n\t\tcond = normalizeSpaces(match[i+2:])\n\t\tmatch = normalizeSpaces(match[:i])\n\t}\n\treturn match, cond, result\n}", "func (p *Parser) Parse() (Statement, error) {\n\ttok, lit := p.scanIgnoreWhitespace()\n\tif _, ok := p.lang.trees[tok]; !ok {\n\t\treturn nil, fmt.Errorf(\"found %q, expected %s\", lit, tokensToString(p.lang.trees))\n\t}\n\ttree := p.lang.trees[tok]\n\tparams := Params{}\n\tfor {\n\t\ttok, lit = p.scanIgnoreWhitespace()\n\t\tif tok == EOF {\n\t\t\tif tree.Handler != nil {\n\t\t\t\treturn tree.Handler(params), nil\n\t\t\t} else if len(tree.Children) == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"internal error: a language spec leaf must have a handler\")\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected end of statement, expecting %s\", tokensToString(tree.Children))\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif _, ok := tree.Children[tok]; !ok {\n\t\t\treturn nil, fmt.Errorf(\"found %q, expected %s\", lit, tokensToString(tree.Children))\n\t\t}\n\t\tif tok == STR || tok == NUM {\n\t\t\tparams = append(params, lit)\n\t\t}\n\t\ttree = tree.Children[tok]\n\t}\n\treturn nil, nil\n}", "func un(p *parser) {\n\tp.indent--\n\tp.printTrace(\")\")\n}", "func un(p *parser) {\n\tp.indent--\n\tp.printTrace(\")\")\n}", "func (t *treeSimplifier) enter(l parse.Node) {\n\tt.nodesDepth = append(t.nodesDepth, l)\n}", "func (this *Codec) deserialize(data string) *TreeNode { \n l:=strings.Split(data,\",\")\n for i:=0;i<len(l);i++{\n if l[i]!=\"\"{\n this.l=append(this.l,l[i])\n }\n }\n fmt.Println(this.l)\n return this.helpDeserialize()\n}", "func parsenode(d *xml.Decoder, cur ElementNode) error {\n\tfor {\n\t\ttkn, err := d.Token()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tswitch tkn.(type) {\n\t\tcase xml.StartElement:\n\t\t\tre := tkn.(xml.StartElement)\n\t\t\ttagname := re.Name.Local\n\t\t\tattrs := make(map[string]string)\n\t\t\tfor _, v := range re.Attr {\n\t\t\t\tattrs[v.Name.Local] = v.Value\n\t\t\t}\n\t\t\tenode := Element(tagname, attrs)\n\t\t\tcur.Append(enode)\n\t\t\tif err := parsenode(d, enode); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase xml.EndElement:\n\t\t\tre := tkn.(xml.EndElement)\n\t\t\tif re.Name.Local != cur.TagName() {\n\t\t\t\t// what do?\n\t\t\t\treturn errors.New(\"closing a tag that was never opened: \" + re.Name.Local)\n\t\t\t}\n\t\t\t// this tag closed\n\t\t\treturn nil\n\t\tcase xml.CharData:\n\t\t\tre := tkn.(xml.CharData)\n\t\t\t// discard if empty\n\t\t\ttxt := strings.TrimSpace(string(re))\n\t\t\tif txt != \"\" {\n\t\t\t\tcur.Append(Text(txt))\n\t\t\t}\n\t\tdefault:\n\t\t\t// ignore xml.Comment\n\t\t\t// ignore xml.ProcInst\n\t\t\t// ignore xml.Directive\n\t\t}\n\t}\n}", "func parseExpression(l string, existing expression) (expression, error) {\n\tif len(l) == 0 {\n\t\treturn nil, errors.New(\"error parsing empty string as expression\")\n\t}\n\te := existing\n\tvar opStack []binaryOperator\n\tfor i := 0; i < len(l); i++ {\n\t\tif l[i] == ' ' {\n\t\t\tcontinue\n\t\t}\n\t\tif l[i] == '(' {\n\t\t\tvar right int\n\t\t\tparens := 1\n\t\t\tfor right = i + 1; right < len(l); right++ {\n\t\t\t\tif l[right] == '(' {\n\t\t\t\t\tparens++\n\t\t\t\t}\n\t\t\t\tif l[right] == ')' {\n\t\t\t\t\tparens--\n\t\t\t\t}\n\t\t\t\tif parens == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif parens != 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"missing end paren at %d after %q\", i, l[i:])\n\t\t\t}\n\t\t\tsubE, err := parseExpression(l[i+1:right], nil)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"error at %d parsing subexpression %q: %v\", i, l[i+1:right], err)\n\t\t\t}\n\t\t\ti = right + 1\n\t\t\tif e == nil {\n\t\t\t\te = subE\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\te = binaryExpression{left: e, right: subE, op: opStack[len(opStack)-1]}\n\t\t\topStack = opStack[:len(opStack)-1]\n\t\t\tcontinue\n\t\t}\n\t\tif l[i] == '+' {\n\t\t\topStack = append(opStack, add)\n\t\t\tcontinue\n\t\t}\n\t\tif l[i] == '*' {\n\t\t\topStack = append(opStack, mul)\n\t\t\tcontinue\n\t\t}\n\t\tn, err := strconv.Atoi(l[i : i+1])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error parsing int at %d: %v\", i, err)\n\t\t}\n\t\tif e == nil {\n\t\t\te = intValue(n)\n\t\t\tcontinue\n\t\t}\n\t\tif len(opStack) == 0 {\n\t\t\t// is len ever != 1?\n\t\t\treturn nil, fmt.Errorf(\"opStack is empty at %d, expression so far: %v\", i, e)\n\t\t}\n\t\te = binaryExpression{left: e, right: intValue(n), op: opStack[len(opStack)-1]}\n\t\topStack = opStack[:len(opStack)-1]\n\t}\n\treturn e, nil\n}", "func un(p *Parser) {\n\tp.indent--\n\tp.printTrace(\")\")\n}", "func un(p *Parser) {\n\tp.indent--\n\tp.printTrace(\")\")\n}", "func (t *treeSimplifier) current() parse.Node {\n\treturn t.nodesDepth[len(t.nodesDepth)-1]\n}", "func ParseLevel(loglevel string) zapcore.Level {\n\tvar lv zapcore.Level\n\tlv.UnmarshalText([]byte(loglevel))\n\treturn lv\n}", "func reParse(d *Dict, s string, strict bool) (*reSyntax, error) {\n\tvar p reParser\n\tp.dict = d\n\n\tstart := 0\n\tparens := 0\n\ti := 0\n\tfor i < len(s) {\n\t\tswitch {\n\t\tcase strings.HasPrefix(s[i:], \"((\"):\n\t\t\tif strict && !atBOL(s, i) {\n\t\t\t\treturn nil, reSyntaxError(s, i, fmt.Errorf(\"(( not at beginning of line\"))\n\t\t\t}\n\t\t\tp.words(s[start:i], \"((\")\n\t\t\tp.push(&reSyntax{op: opLeftParen})\n\t\t\ti += 2\n\t\t\tstart = i\n\t\t\tparens++\n\n\t\tcase strings.HasPrefix(s[i:], \"||\"):\n\t\t\tif strict && parens == 0 {\n\t\t\t\treturn nil, reSyntaxError(s, i, fmt.Errorf(\"|| outside (( ))\"))\n\t\t\t}\n\t\t\tp.words(s[start:i], \"||\")\n\t\t\tif err := p.verticalBar(); err != nil {\n\t\t\t\treturn nil, reSyntaxError(s, i, err)\n\t\t\t}\n\t\t\ti += 2\n\t\t\tstart = i\n\n\t\tcase strings.HasPrefix(s[i:], \"))\"):\n\t\t\t// )) must be followed by ?? or end line\n\t\t\tif strict {\n\t\t\t\tj := i + 2\n\t\t\t\tfor j < len(s) && (s[j] == ' ' || s[j] == '\\t') {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tif j < len(s) && s[j] != '\\n' && (j+1 >= len(s) || s[j] != '?' || s[j+1] != '?') {\n\t\t\t\t\treturn nil, reSyntaxError(s, i, fmt.Errorf(\")) not at end of line\"))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tp.words(s[start:i], \"))\")\n\t\t\tif err := p.rightParen(); err != nil {\n\t\t\t\treturn nil, reSyntaxError(s, i, err)\n\t\t\t}\n\t\t\ti += 2\n\t\t\tstart = i\n\t\t\tparens--\n\n\t\tcase strings.HasPrefix(s[i:], \"??\"):\n\t\t\t// ?? must be preceded by )) on same line and must end the line.\n\t\t\tif strict {\n\t\t\t\tj := i\n\t\t\t\tfor j > 0 && (s[j-1] == ' ' || s[j-1] == '\\t') {\n\t\t\t\t\tj--\n\t\t\t\t}\n\t\t\t\tif j < 2 || s[j-1] != ')' || s[j-2] != ')' {\n\t\t\t\t\treturn nil, reSyntaxError(s, i, fmt.Errorf(\"?? not preceded by ))\"))\n\t\t\t\t}\n\t\t\t}\n\t\t\tif strict && !atEOL(s, i+2) {\n\t\t\t\treturn nil, reSyntaxError(s, i, fmt.Errorf(\"?? not at end of line\"))\n\t\t\t}\n\n\t\t\tp.words(s[start:i], \"??\")\n\t\t\tif err := p.quest(); err != nil {\n\t\t\t\treturn nil, reSyntaxError(s, i, err)\n\t\t\t}\n\t\t\ti += 2\n\t\t\tstart = i\n\n\t\tcase strings.HasPrefix(s[i:], \"__\"):\n\t\t\tj := i + 2\n\t\t\tfor j < len(s) && '0' <= s[j] && s[j] <= '9' {\n\t\t\t\tj++\n\t\t\t}\n\t\t\tif j == i+2 {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !strings.HasPrefix(s[j:], \"__\") {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tn, err := strconv.Atoi(s[i+2 : j])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, reSyntaxError(s, i, errors.New(\"invalid wildcard count \"+s[i:j+2]))\n\t\t\t}\n\t\t\tp.words(s[start:i], \"__\")\n\t\t\tp.push(&reSyntax{op: opWild, n: int32(n)})\n\t\t\ti = j + 2\n\t\t\tstart = i\n\n\t\tcase strings.HasPrefix(s[i:], \"//**\"):\n\t\t\tj := strings.Index(s[i+4:], \"**//\")\n\t\t\tif j < 0 {\n\t\t\t\treturn nil, reSyntaxError(s, i, errors.New(\"opening //** without closing **//\"))\n\t\t\t}\n\t\t\tp.words(s[start:i], \"//** **//\")\n\t\t\ti += 4 + j + 4\n\t\t\tstart = i\n\n\t\tdefault:\n\t\t\ti++\n\t\t}\n\t}\n\n\tp.words(s[start:], \"\")\n\tp.concat()\n\tif p.swapVerticalBar() {\n\t\t// pop vertical bar\n\t\tp.stack = p.stack[:len(p.stack)-1]\n\t}\n\tp.alternate()\n\n\tn := len(p.stack)\n\tif n != 1 {\n\t\treturn nil, reSyntaxError(s, len(s), fmt.Errorf(\"missing )) at end\"))\n\t}\n\treturn p.stack[0], nil\n}", "func (p *parser) parseStatement() (nodes.Node, error) {\n\ttoken := p.current()\n\tfor tokenType, parser := range p.statementParsers {\n\t\tif p.advanceIf(isType(tokenType)) {\n\t\t\treturn parser(token)\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"expected start of statement (near %s)\", p.current().Text)\n}", "func (p *Parser) parseSegment(tokenEnd tokenType) List {\n\tlist := make(List, 0)\n\tnodes := NewItems()\n\nloop:\n\tfor {\n\t\tswitch p.peek(1).typ {\n\t\tcase tokenLeftParenthesis:\n\t\t\tp.next()\n\t\t\tif tokenEnd == tokenRightParenthesis {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tnewNodes := p.parseSegment(tokenRightParenthesis)\n\t\t\tfor i := 0; i < len(newNodes); i++ {\n\t\t\t\tif newNodes[i].Len() == 1 {\n\t\t\t\t\t// Skip one-token segment which most likely is an abbreviation.\n\t\t\t\t} else {\n\t\t\t\t\tlist = append(list, newNodes[i])\n\t\t\t\t}\n\t\t\t}\n\t\tcase tokenIdentifier:\n\t\t\tn := p.parseIdentifier()\n\t\t\tnodes.Add(n)\n\t\tcase tokenNumber:\n\t\t\tif n := p.parseNumber(); n.Valid() {\n\t\t\t\tnodes.Add(n)\n\t\t\t}\n\t\tcase tokenUnit:\n\t\t\tif n := p.parseUnit(); n.Valid() {\n\t\t\t\tnodes.Add(n)\n\t\t\t}\n\t\tcase tokenNegation, tokenComparison, tokenLessComparison, tokenGreaterComparison:\n\t\t\tif n := p.parseComparison(); n.Valid() {\n\t\t\t\tnodes.Add(n)\n\t\t\t}\n\t\tcase tokenConjunction:\n\t\t\tif n := p.parseConjunction(); n.Valid() {\n\t\t\t\tnodes.Add(n)\n\t\t\t}\n\t\tcase tokenSlash:\n\t\t\tif nodes.LastType() == itemNumber {\n\t\t\t\t// Because a number preceded the slash, these tokens\n\t\t\t\t// may compose to a unit, such as '/ul'.\n\t\t\t\tn := p.parseIdentifier()\n\t\t\t\tnodes.Add(n)\n\t\t\t} else {\n\t\t\t\tif n := p.parseSlash(); n.Valid() {\n\t\t\t\t\tnodes.Add(n)\n\t\t\t\t}\n\t\t\t}\n\t\tcase tokenDash:\n\t\t\tif n := p.parseDash(); n.Valid() {\n\t\t\t\tnodes.Add(n)\n\t\t\t}\n\t\tcase tokenPunctuation:\n\t\t\tif n := p.parsePunctuation(); n.Valid() {\n\t\t\t\tnodes.Add(n)\n\t\t\t}\n\t\tcase tokenEOF:\n\t\t\tbreak loop\n\t\tcase tokenEnd:\n\t\t\tp.next()\n\t\t\tbreak loop\n\t\tdefault:\n\t\t\tp.next()\n\t\t}\n\t}\n\tif !nodes.Empty() {\n\t\tlist = append(list, nodes)\n\t}\n\treturn list\n}", "func (p *Parser) parse(\n\tdepth int, sql string, tokens []sqlSymType, nakedIntType *types.T,\n) (Statement, error) {\n\tp.lexer.init(sql, tokens, nakedIntType)\n\tdefer p.lexer.cleanup()\n\tif p.parserImpl.Parse(&p.lexer) != 0 {\n\t\tif p.lexer.lastError == nil {\n\t\t\t// This should never happen -- there should be an error object\n\t\t\t// every time Parse() returns nonzero. We're just playing safe\n\t\t\t// here.\n\t\t\tp.lexer.Error(\"syntax error\")\n\t\t}\n\t\terr := p.lexer.lastError\n\n\t\t// Compatibility with 19.1 telemetry: prefix the telemetry keys\n\t\t// with the \"syntax.\" prefix.\n\t\t// TODO(knz): move the auto-prefixing of feature names to a\n\t\t// higher level in the call stack.\n\t\ttkeys := errors.GetTelemetryKeys(err)\n\t\tif len(tkeys) > 0 {\n\t\t\tfor i := range tkeys {\n\t\t\t\ttkeys[i] = \"syntax.\" + tkeys[i]\n\t\t\t}\n\t\t\terr = errors.WithTelemetry(err, tkeys...)\n\t\t}\n\n\t\treturn Statement{}, err\n\t}\n\treturn Statement{\n\t\tAST: p.lexer.stmt,\n\t\tSQL: sql,\n\t\tNumPlaceholders: p.lexer.numPlaceholders,\n\t\tNumAnnotations: p.lexer.numAnnotations,\n\t}, nil\n}", "func (p *Parser) parseGroupedExpression() asti.ExpressionI {\n\tp.nextToken()\n\n\texp := p.parseExpression(precedence.LOWEST)\n\tif !p.expectPeek(tokentype.RPAREN) {\n\t\treturn nil\n\t}\n\treturn exp\n}", "func (f *FrontPage) parse(db *sql.DB) *FrontPage {\n\tpage, err := goquery.NewDocument(ROOT_URL)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\ta := f.next()\n\tpage.Find(MAIN_TABLE).Each(func(_ int, s *goquery.Selection) {\n\t\tcls, _ := s.Attr(\"class\")\n\t\tswitch cls {\n\t\t// Articles are separated by empty tr elements with the \"spacer\" class\n\t\tcase SPACER:\n\t\t\ta.store(db)\n\t\t\ta = f.next()\n\t\t\t// Each article starts with a title row classed (for better or worse) \"athing\"\n\t\tcase TITLE_ROW:\n\t\t\ta.parseTitleRow(s)\n\t\t\t// Title rows are followed by an unclassed tr with a child td classed \"subtext\"\n\t\t\t// There are also a few other rows without classnames, which should be ignored.\n\t\tcase \"\":\n\t\t\tif a.parseSubtextRow(s.Find(SUBTEXT)) {\n\t\t\t\tf.Articles = append(f.Articles, a)\n\t\t\t}\n\t\t}\n\t})\n\treturn f\n}", "func (p *Page) parse() error {\n\tp.once.Do(func() {\n\t\tif p.root, p.err = html.Parse(p.body); p.err != nil {\n\t\t\tp.err = fmt.Errorf(\"ant: parse html %q - %w\", p.URL, p.err)\n\t\t}\n\t\tp.close()\n\t})\n\treturn p.err\n}", "func parse(src string) (interface{}, error) {\n\ttokens := tokenize(src)\n\tast, remainder, err := readFromTokens(tokens)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(remainder) > 0 {\n\t\treturn nil, errors.New(\"unexpected trailing tokens\")\n\t}\n\treturn ast, nil\n}", "func Parse(tokens *list.List, funcDefs map[string]int) (ParseTreeRoot, error) {\r\n\r\n\ttoken := tokens.Front()\r\n\ttree := ParseTreeRoot{make([]ParseTree, 0)}\r\n\r\n\tfor token != nil {\r\n\t\tif tokenID(token) != TokenIdentifier {\r\n\t\t\treturn tree, fmt.Errorf(\"\\\"unit\\\", \\\"assembly\\\", \\\"enum\\\", \\\"summarize\\\", or \\\"solve\\\" expected but \\\"%s\\\" given at position %d\", tokenContent(token), tokenPos(token))\r\n\t\t}\r\n\r\n\t\tswitch tokenContent(token) {\r\n\t\tcase \"unit\":\r\n\t\t\t_token, unit, err := parseUnit(token.Next(), tree, funcDefs)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddUnit(unit)\r\n\t\t\tbreak\r\n\t\tcase \"enum\":\r\n\t\t\t_token, enum, err := parseEnum(token.Next(), tree)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddEnum(enum)\r\n\t\t\tbreak\r\n\t\tcase \"assembly\":\r\n\t\t\t_token, assembly, err := parseAssembly(token.Next(), tree)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddAssembly(assembly)\r\n\t\t\tbreak\r\n\t\tcase \"summarize\":\r\n\t\t\t_token, summarize, err := parseSummarize(token.Next(), tree)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddSummarize(summarize)\r\n\t\t\tbreak\r\n\t\tcase \"solve\":\r\n\t\t\t_token, solve, err := parseSolve(token.Next(), tree)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddSolve(solve)\r\n\t\t\tbreak\r\n\t\t}\r\n\t\ttoken = token.Next()\r\n\t}\r\n\r\n\treturn tree, nil\r\n\r\n}", "func parseSingleSExp(tokens []Token, i int) (SExp, int) {\n\n\t// prelim. checking to make sure we don't go over\n\tlength := len(tokens)\n\tif i >= length {\n\t\treturn BadSExp{}, length\n\t}\n\n\ttokenType := tokens[i].tokenType\n\n\t// atom cases\n\tif tokenType == DONE {\n\n\t\treturn Atom{atom: tokens[i]}, i + 1\n\n\t} else if tokenType == ID {\n\n\t\t// but we need to check if next is a HONK\n\t\tif i + 1 >= length {\n\t\t\treturn Atom{atom: tokens[i]}, i + 1\n\t\t}\n\n\t\tif tokens[i + 1].tokenType == HONK {\n\t\t\t// in this case, it's a list\n\t\t\treturn honkParsing(tokens, i + 1, Atom{atom: tokens[i]})\n\t\t}\n\n\t\treturn Atom{atom: tokens[i]}, i + 1\n\n\t} else if tokenType == RPAREN || tokenType == ERROR || tokenType == LPAREN { // error cases\n\n\t\treturn BadSExp{}, i + 1\n\n\t} else {\n\n\t\t// must be a goose or a gosling\n\t\t// in this case we make them a list, ie we group the tokens up\n\t\t// form: Goose var-name SExp that's not a goose, deal in the list parsing\n\t\t// form: Gosling var-name Honk ... honK or Gosling var-name ID\n\n\t\tnextSExp := SExpListNode{first: Atom{atom: tokens[i]}}\n\n\t\tif i + 1 >= length {\n\t\t\treturn generateBadList(), length\n\t\t}\n\n\t\tif tokens[i + 1].tokenType != ID {\n\t\t\treturn generateBadList(), i + 1 // move onto the next one, assume current one is broken, but we don't move to end\n\t\t}\n\n\t\t// goose-or-gosling <var-name> is put into list\n\t\tnextSExp.rest = &SExpListNode{first: Atom{atom: tokens[i + 1]}}\n\n\t\t// here is where it differs depending on goose or gosling\n\t\t// for goose we skip over just the var name and goose\n\t\t// gosling we skip over the Honk as well\n\t\tif tokenType == GOOSE {\n\t\t\tvar sexp SExp\n\t\t\tsexp, i = parseSingleSExp(tokens, i + 2)\n\n\t\t\t// add in body of goose, finish with empty\n\t\t\trest := SExpListNode{first: sexp, rest: &SExpListNode{first: Empty{}}}\n\t\t\tnextSExp.rest.rest = &rest\n\t\t} else {\n\t\t\tvar sexpListNode SExpListNode\n\t\t\tif i + 2 >= length {\n\t\t\t\treturn generateBadList(), length\n\t\t\t}\n\n\t\t\tif tokens[i + 2].tokenType != LPAREN {\n\t\t\t\treturn generateBadList(), length\n\t\t\t}\n\n\t\t\tsexpListNode, i = parseSExpList(tokens, i + 3)\n\t\t\tif isBadList(sexpListNode) {\n\t\t\t\treturn generateBadList(), i\n\t\t\t}\n\n\t\t\tnextSExp.rest.rest = &sexpListNode // we skip over SExpListNode, that's why we only have two rest's\n\n\t\t\t// now we need to check if HONK comes after the function, ie is an application\n\t\t\tif i < length {\n\t\t\t\tif tokens[i].tokenType == HONK {\n\t\t\t\t\treturn honkParsing(tokens, i, nextSExp)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\treturn nextSExp, i\n\n\t}\n}", "func ParseOne(path string) (yaxml.XMLTree, error) {\r\n\tfile, e := os.Open(path)\r\n\tif e != nil {\r\n\t\tlog.Fatalln(e)\r\n\t}\r\n\tdefer file.Close()\r\n\r\n\tfmt.Println(\"ParseOne()\")\r\n\treturn yaxml.Parse(file)\r\n}", "func Parse(shards []*dynamodbstreams.Shard) *Node {\n\troot := &Node{}\n\n\tfor _, shard := range shards {\n\t\tif node, ok := root.Find(shard.ParentShardId); ok {\n\t\t\tnode.appendShard(shard)\n\t\t\tcontinue\n\t\t}\n\n\t\troot.appendShard(shard)\n\t}\n\n\tvar children []*Node\n\tfor _, child := range root.Children {\n\t\tif child.Shard.ParentShardId == nil {\n\t\t\tchildren = append(children, child)\n\t\t\tcontinue\n\t\t}\n\n\t\tif node, ok := root.Find(child.Shard.ParentShardId); ok {\n\t\t\tnode.appendNode(child)\n\t\t\tcontinue\n\t\t}\n\n\t\tchildren = append(children, child)\n\t}\n\troot.Children = children\n\n\treturn root\n}", "func (p *parser) parseBlock() (block tree.Block) {\n\tfor last := false; !last && !p.isBlockFollow(); {\n\t\tvar stmt tree.Stmt\n\t\tstmt, last = p.parseStmt()\n\t\tblock.Items = append(block.Items, stmt)\n\t\tvar semi tree.Token\n\t\tif p.tok == token.SEMICOLON {\n\t\t\tsemi = p.tokenNext()\n\t\t}\n\t\tblock.Seps = append(block.Seps, semi)\n\t}\n\treturn\n}", "func (t *ASCIITree) Level() int {\n\tif t.Parent == nil {\n\t\t// if the root node does not have text, it will be considered level -1\n\t\t// so that all it's children will be roots.\n\t\tif t.Text == \"\" {\n\t\t\treturn -1\n\t\t}\n\t\treturn 0\n\t}\n\treturn t.Parent.Level() + 1\n}", "func parse(selector string, path *field.Path) (internalSelector, error) {\n\tp := &Parser{l: &Lexer{s: selector, pos: 0}, path: path}\n\titems, err := p.parse()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Sort(ByKey(items)) // sort to grant determistic parsing\n\treturn internalSelector(items), err\n}", "func (this *Codec) deserialize(data string) *TreeNode {\n\t// fmt.Println(data)\n\tdata = data[1 : len(data)-1] // remove the \"[]\"\n\telements := strings.Split(data, \",\")\n\n\tif len(elements) < 3 {\n\t\treturn nil\n\t}\n\n\trootVal, _ := strconv.Atoi(elements[0])\n\troot := &TreeNode{\n\t\tVal: rootVal,\n\t}\n\tqueue := []*TreeNode{}\n\tqueue = append(queue, root)\n\n\ti := 1\n\tfor ; i < len(elements); i = i + 2 {\n\t\tparent := queue[0]\n\t\tqueue = queue[1:]\n\n\t\tleft := elements[i]\n\t\tright := elements[i+1]\n\n\t\tif left != NULL_STR {\n\t\t\tleftVal, _ := strconv.Atoi(left)\n\t\t\tleftNode := &TreeNode{\n\t\t\t\tVal: leftVal,\n\t\t\t}\n\t\t\tparent.Left = leftNode\n\t\t\tqueue = append(queue, leftNode)\n\t\t}\n\n\t\tif right != NULL_STR {\n\t\t\trightVal, _ := strconv.Atoi(right)\n\t\t\trightNode := &TreeNode{\n\t\t\t\tVal: rightVal,\n\t\t\t}\n\t\t\tparent.Right = rightNode\n\t\t\tqueue = append(queue, rightNode)\n\t\t}\n\t}\n\n\treturn root\n}", "func (this *Codec) deserialize(data string) *TreeNode {\n\tif data == \"\" {\n\t\treturn nil\n\t}\n\titems := strings.Split(data, \",\")\n\tvalue, _ := strconv.Atoi(items[0])\n\troot := &TreeNode{Val: value}\n\tp := root\n\tqueue := []*TreeNode{root}\n\tfor i := 0; len(queue) != 0; {\n\t\tp = queue[0]\n\t\tqueue = queue[1:]\n\t\ti++\n\t\tif items[i] == \"\" {\n\t\t\tp.Left = nil\n\t\t} else {\n\t\t\tvalue, _ = strconv.Atoi(items[i])\n\t\t\tp.Left = &TreeNode{Val: value}\n\t\t\tqueue = append(queue, p.Left)\n\t\t}\n\t\ti++\n\t\tif items[i] == \"\" {\n\t\t\tp.Right = nil\n\t\t} else {\n\t\t\tvalue, _ = strconv.Atoi(items[i])\n\t\t\tp.Right = &TreeNode{Val: value}\n\t\t\tqueue = append(queue, p.Right)\n\t\t}\n\t}\n\treturn root\n}", "func (n *nestedTypeData) Unnest() *int {\n\tif len(n.maxElems) == 0 {\n\t\treturn nil\n\t}\n\tv := n.maxElems[len(n.maxElems)-1]\n\tn.maxElems = n.maxElems[:len(n.maxElems)-1]\n\treturn v\n}", "func (w *Witness) parse(sthRaw []byte, logID string) (*ct.SignedTreeHead, error) {\n\tsv, ok := w.Logs[logID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"log %q not found\", logID)\n\t}\n\tvar sth ct.SignedTreeHead\n\tif err := json.Unmarshal(sthRaw, &sth); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal json: %v\", err)\n\t}\n\tvar idHash ct.SHA256Hash\n\tif err := idHash.FromBase64String(logID); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode logID: %v\", err)\n\t}\n\tvar empty ct.SHA256Hash\n\tif bytes.Equal(sth.LogID[:], empty[:]) {\n\t\tsth.LogID = idHash\n\t} else if !bytes.Equal(sth.LogID[:], idHash[:]) {\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"STH logID = %q, input logID = %q\", sth.LogID.Base64String(), logID)\n\t}\n\tif err := sv.VerifySTHSignature(sth); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to verify STH signature: %v\", err)\n\t}\n\treturn &sth, nil\n}", "func ParseLevel(lvl string) (LogLevel, error) {\n\tswitch strings.ToLower(lvl) {\n\tcase \"none\":\n\t\treturn LvlNone, nil\n\tcase \"fatal\":\n\t\treturn LvlFatal, nil\n\tcase \"error\":\n\t\treturn LvlError, nil\n\tcase \"warn\", \"warning\":\n\t\treturn LvlWarn, nil\n\tcase \"info\":\n\t\treturn LvlInfo, nil\n\tcase \"debug\":\n\t\treturn LvlDebug, nil\n\t}\n\n\tvar l LogLevel\n\treturn l, fmt.Errorf(\"not a valid slog Level: %q\", lvl)\n}", "func (p *parser) parseStmt() (stmt tree.Stmt, last bool) {\n\tswitch p.tok {\n\tcase token.DO:\n\t\treturn p.parseDoStmt(), false\n\tcase token.WHILE:\n\t\treturn p.parseWhileStmt(), false\n\tcase token.REPEAT:\n\t\treturn p.parseRepeatStmt(), false\n\tcase token.IF:\n\t\treturn p.parseIfStmt(), false\n\tcase token.FOR:\n\t\treturn p.parseForStmt(), false\n\tcase token.FUNCTION:\n\t\treturn p.parseFunctionStmt(), false\n\tcase token.LOCAL:\n\t\treturn p.parseLocalStmt(), false\n\tcase token.RETURN:\n\t\treturn p.parseReturnStmt(), true\n\tcase token.BREAK:\n\t\treturn p.parseBreakStmt(), true\n\t}\n\treturn p.parseExprStmt(), false\n}", "func (p *parser) next0() {\n\t// Because of one-token look-ahead, print the previous token\n\t// when tracing as it provides a more readable output. The\n\t// very first token (!p.pos.IsValid()) is not initialized\n\t// (it is token.ILLEGAL), so don't print it.\n\tif p.trace && p.pos.IsValid() {\n\t\ts := p.tok.String()\n\t\tswitch {\n\t\tcase p.tok.IsLiteral():\n\t\t\tp.printTrace(s, p.lit)\n\t\tcase p.tok.IsStringLiteral():\n\t\t\tlit := p.lit\n\t\t\t// Simplify trace expression.\n\t\t\tif lit != \"\" {\n\t\t\t\tlit = `\"` + lit + `\"`\n\t\t\t}\n\t\t\tp.printTrace(s, lit)\n\t\tcase p.tok.IsOperator(), p.tok.IsCommand():\n\t\t\tp.printTrace(\"\\\"\" + s + \"\\\"\")\n\t\tdefault:\n\t\t\tp.printTrace(s)\n\t\t}\n\t}\n\n\tp.pos, p.tok, p.lit = p.scanner.Scan()\n}", "func (this *Codec) deserialize(data string) *TreeNode {\n\tc := strings.Split(data, \",\")\n\n\tif len(data) == 0 {\n\t\treturn nil\n\t}\n\n\tt := &TreeNode{Val: myAtoi(c[0])}\n\tqueue := []*TreeNode{t}\n\n\ti := 1\n\tfor len(queue) > 0 {\n\t\tl := len(queue)\n\t\tfor j := 0; j < l; j++ {\n\t\t\tif c[i] == \"nil\" {\n\t\t\t\tqueue[j].Left = nil\n\t\t\t} else {\n\t\t\t\tqueue[j].Left = &TreeNode{Val: myAtoi2(c[i])}\n\t\t\t\tqueue = append(queue, queue[j].Left)\n\t\t\t}\n\t\t\ti++\n\t\t\tif c[i] == \"nil\" {\n\t\t\t\tqueue[j].Right = nil\n\t\t\t} else {\n\t\t\t\tqueue[j].Right = &TreeNode{Val: myAtoi2(c[i])}\n\t\t\t\tqueue = append(queue, queue[j].Right)\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tqueue = queue[l:]\n\t}\n\treturn t\n}", "func parseSubtree(ds *docState) []nodes.Node {\n\tvar nodes []nodes.Node\n\tfor ds.cur = ds.cur.FirstChild; ds.cur != nil; ds.cur = ds.cur.NextSibling {\n\t\tif n, ok := parseNode(ds); ok {\n\t\t\tif n != nil {\n\t\t\t\tnodes = append(nodes, n)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tds.push(nil)\n\t\tnodes = append(nodes, parseSubtree(ds)...)\n\t\tds.pop()\n\t}\n\treturn nodes\n}", "func unwrapFirst(n syntax.Node) syntax.Node {\n\tswitch n := n.(type) {\n\tcase *syntax.Root:\n\t\treturn unwrapFirst(&n.NodeList)\n\n\tcase *syntax.NodeList:\n\t\tif len(n.Nodes) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\treturn unwrapFirst(n.Nodes[0])\n\tcase *syntax.ExprStmt:\n\t\treturn unwrapFirst(n.Expr)\n\tcase *syntax.DeclStmt:\n\t\treturn unwrapFirst(n.Decl)\n\tcase *syntax.ModuleDef:\n\t\treturn unwrapFirst(n.Def)\n\tdefault:\n\t\treturn n\n\t}\n}", "func pop_level(){\nif text_info[cur_state.repl_field].text_link<max_texts{/* link to a continuation */\ncur_state.repl_field= text_info[cur_state.repl_field].text_link/* stay on the same level */\ncur_state.byte_field= text_info[cur_state.repl_field].token\nreturn\n}\n\nif len(stack)> 0{\ncur_state= stack[len(stack)-1]\nstack= stack[:len(stack)-1]\n}\n}", "func (a Analyzer) Parse(re *syntax.Regexp) *Node {\n\treturn analyze(re, true)\n}", "func (this *Codec) deserialize(data string) *TreeNode {\n\tif data == \"\" {\n\t\treturn nil\n\t}\n\tans := strings.Split(data, \",\")\n\n\tvar i = new(int)\n\t*i = 0\n\treturn deserialize(&ans, i)\n}", "func (p *Parser) Parse() int {\n\tp.rootNode = nil\n\n\treturn yyParse(p)\n}", "func ParseDir(T *Tree) []*Node {\n var ret []*Node\n return append(ret, &Node{\n Name: \".\",\n Format: \"root\",\n Children: T.Children,\n Id: \"\",\n\n Num_children: len(T.Children),\n })\n}", "func (p *Parser) next0() {\n\t// Because of one-token look-ahead, print the previous token\n\t// when tracing as it provides a more readable output. The\n\t// very first token (!p.pos.IsValid()) is not initialized\n\t// (it is token.ILLEGAL), so don't print it .\n\tif p.trace && p.pos.IsValid() {\n\t\ts := p.tok.String()\n\t\tswitch {\n\t\tcase p.tok.IsLiteral():\n\t\t\tp.printTrace(s, p.lit)\n\t\tcase p.tok.IsOperator(), p.tok.IsKeyword():\n\t\t\tp.printTrace(\"\\\"\" + s + \"\\\"\")\n\t\tdefault:\n\t\t\tp.printTrace(s)\n\t\t}\n\t}\n\n\tp.pos, p.tok, p.lit = p.lexer.Lex()\n}", "func (is *infosec) parse(page *Page) {\n\tis.Map.parse(page)\n}", "func ParseLevel(level string) (Level, error) {\n\tswitch level {\n\tcase \"panic\":\n\t\treturn LevelPanic, nil\n\tcase \"fatal\":\n\t\treturn LevelFatal, nil\n\tcase \"error\":\n\t\treturn LevelError, nil\n\tcase \"warn\", \"warning\":\n\t\treturn LevelWarn, nil\n\tcase \"info\":\n\t\treturn LevelInfo, nil\n\tcase \"debug\":\n\t\treturn LevelDebug, nil\n\t}\n\n\tvar l Level\n\treturn l, fmt.Errorf(\"not a valid log Level: %q\", level)\n}", "func (s *BaseMySqlParserListener) EnterNestedExpressionAtom(ctx *NestedExpressionAtomContext) {}", "func (sg *spawnGroup) parse() {\n\treader := NewReader(sg.manifest)\n\n\tisCompressed := reader.readBoolean()\n\tsize := reader.readBits(24)\n\tdata := reader.readBytes(int(size))\n\n\tif isCompressed {\n\t\tdataUnc, err := unlzss(data)\n\t\tif err != nil {\n\t\t\t_panicf(\"Error uncompressing spawnGroup data %s\", err)\n\t\t}\n\n\t\tdata = dataUnc\n\t}\n\n\treader2 := NewReader(data)\n\trTypes := reader2.readBits(16) // number of different ressource types in the data\n\n\tif rTypes > 1 {\n\t\trStrings := reader2.readBits(16) // number of models / particles / ressources to load\n\t\t_ = reader2.readBits(16) // currently not known, probably the size for another field\n\n\t\tfor i := 0; uint32(i) < rTypes; i++ {\n\t\t\t_ = reader2.readString() // e.g. vmdl, vmat, vpcf\n\t\t}\n\n\t\tfor i := 0; uint32(i) < rStrings; i++ {\n\t\t\t_ = reader2.readString() // e.g. models/items/rubick/peculiar_prestidigitator_shoulders/\n\t\t}\n\t}\n}", "func parseRule(p *blockParser) stateFn {\n\tr := p.next()\n\tr1 := p.peek(2)\n\tif r1 == ' ' {\n\t\tfor {\n\t\t\tr1 = p.next()\n\t\t\tif r1 != ' ' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tr1 = p.next()\n\t\t\tif r1 != r {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tp.emit(&Rule{})\n\t\treturn parseBegin\n\t} else {\n\t\tp.consume(r)\n\t\tp.emit(&Rule{})\n\t\treturn parseBegin\n\t}\n\n}", "func parseBranch(data []byte, i int) (Node, int) {\n\tif data[i] != '(' {\n\t\tpanic(fmt.Sprintf(\"internal error at offset %d: expected '(', got %c\", i, data[i]))\n\t}\n\ti++\n\tvar br BranchNode\n\tfor i < len(data) {\n\t\tnode, j := parseSequence(data, i)\n\t\tif j > i {\n\t\t\tbr = append(br, node)\n\t\t}\n\t\tswitch data[j] {\n\t\tcase ')':\n\t\t\treturn br, j\n\t\tcase '|':\n\t\t\ti = j + 1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"parse error at offset %d: expected ')' or '|', got %c (%[1]d)\", j, data[j]))\n\t\t}\n\t}\n\tpanic(\"unexpected end of input\")\n}", "func (layout Layout) rootLevel() int {\n\treturn layout.numLevels() - 1\n}", "func (p *parser) next() {\n\tif p.look != nil {\n\t\t// Consume stored state.\n\t\tp.tokenstate = *p.look\n\t\tp.look = nil\n\t\treturn\n\t}\n\n\tp.off, p.tok, p.lit = p.scanner.Scan()\n\n\tp.pre = nil\n\t// Skip over prefix tokens, accumulating them in p.pre.\n\tfor p.tok.IsPrefix() {\n\t\tp.pre = append(p.pre, tree.Prefix{Type: p.tok, Bytes: p.lit})\n\t\tp.off, p.tok, p.lit = p.scanner.Scan()\n\t}\n}", "func (p *parser) next() (t token) {\n\tp.fill()\n\tif len(p.buf) > 0 {\n\t\tt = p.buf[0]\n\t\tp.buf = p.buf[1:]\n\t}\n\tp.fill()\n\n\treturn\n}", "func ParseLevel(level string) zerolog.Level {\n\tswitch strings.ToUpper(level) {\n\tcase \"FATAL\":\n\t\treturn zerolog.FatalLevel\n\tcase \"ERROR\":\n\t\treturn zerolog.ErrorLevel\n\tcase \"WARNING\":\n\t\treturn zerolog.WarnLevel\n\tcase \"INFO\":\n\t\treturn zerolog.InfoLevel\n\tcase \"DEBUG\":\n\t\treturn zerolog.DebugLevel\n\tdefault:\n\t\treturn zerolog.DebugLevel\n\t}\n}", "func parse(text string) (Node, error) {\n\tp := &parser{}\n\terr := p.Parse(text)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p.Root, nil\n}", "func parseSubset(sec [][]byte, lineCount int) (*changeSubset, error) {\n\t// the first subset can be a blank line, which is not an error\n\tif len(sec) < 1 || (len(sec) == 1 && len(sec[0]) == 0) {\n\t\treturn nil, nil\n\t}\n\tif len(sec) < 2 {\n\t\treturn nil, fmt.Errorf(\"subsection not long enough\")\n\t}\n\tif len(sec[0]) < 5 {\n\t\treturn nil, fmt.Errorf(\"subsection first line ('%s') not long enough\", sec[0])\n\t}\n\tcss := &changeSubset{heading: string(sec[0][4:])}\n\tfor _, l := range sec[1:] {\n\t\tif len(l) > 0 {\n\t\t\tcss.changes = append(css.changes, l)\n\t\t}\n\t}\n\treturn css, nil\n}", "func (inclusiveGateway *InclusiveGateway) ParseTree(definitions *Definitions) {\n\tinclusiveGateway.Gateway.ParseTree(definitions)\n\n}", "func makeST(delim string, tokens []string) ([]STNode, []string) {\n\tnodes := make([]STNode, 0, len(tokens))\n\tzip := false\n\tdot := false\n\tvar prev *STNode\n\tvar current STNode\n\tnewline := false\n\tprevlength := 0\n\ti := 0\n\n\tfor ; i < len(tokens); i++ {\n\t\tif tokens[i] == TokenDelimiters[delim] { return nodes, tokens[i + 1:] }\n\n\t\tif tokens[i] == \"\\n\" {\n\t\t\tdelimtype := TokenDelimiterTypes[delim]\n\t\t\tif newline && (len(nodes) - prevlength) > 1 &&\n\t\t\t\tdelimtype != STNodeTypeMap && delimtype != STNodeTypeList {\n\t\t\t\tnode := STNode{\n\t\t\t\t\tType: STNodeTypeExpression,\n\t\t\t\t\tChildren: make([]STNode, len(nodes[prevlength:])),\n\t\t\t\t}\n\t\t\t\tcopy(node.Children, nodes[prevlength:])\n\t\t\t\tnodes = nodes[:prevlength]\n\t\t\t\tnodes, prev, zip, dot = appendNode(nodes, node, prev, zip, dot)\n\t\t\t}\n\t\t\tnewline = true\n\t\t\tprevlength = len(nodes)\n\t\t\tcontinue\n\t\t}\n\n\t\tcurrent = STNode{\n\t\t\tHead: tokens[i],\n\t\t\tType: STNodeTypeIdentifier,\n\t\t\tChildren: make([]STNode, 0),\n\t\t}\n\n\t\t// check if current token is a delimiter '[]' or '{}'\n\t\t// parse recursively if so\n\t\tdelimtype, isDelimiter := TokenDelimiterTypes[current.Head]\n\t\tif isDelimiter {\n\t\t\tvar newtokens []string\n\t\t\tcurrent.Type = delimtype\n\t\t\tcurrent.Children, newtokens = makeST(current.Head, tokens[i + 1:])\n\t\t\ti = -1\n\t\t\ttokens = newtokens\n\t\t\tnodes, prev, zip, dot = appendNode(nodes, current, prev, zip, dot)\n\t\t\tcontinue\n\t\t}\n\n\t\t// check if current token is an extended literal i.e a string or comment\n\t\tliteraltype, isLiteral := LiteralDelimiterTypes[string(current.Head[0])]\n\t\tif isLiteral {\n\t\t\tif literaltype == STNodeTypeComment { continue }\n\t\t\tif literaltype == STNodeTypeStringLiteral {\n\t\t\t\tcurrent.Head = fmt.Sprintf(\"\\\"%s\\\"\", normalizeString(current.Head[1:len(current.Head) - 1]))\n\t\t\t}\n\t\t\tcurrent.Type = literaltype\n\t\t\tnodes, prev, zip, dot = appendNode(nodes, current, prev, zip, dot)\n\t\t\tcontinue\n\t\t}\n\n\t\t// check if current token is a number literal\n\t\tnum, err := strconv.ParseFloat(current.Head, 64)\n\t\tif err == nil {\n\t\t\tcurrent.Type = STNodeTypeNumberLiteral\n\t\t\tif float64(int(num)) == num {\n\t\t\t\tcurrent.Head = strconv.Itoa(int(num))\n\t\t\t} else { current.Head = fmt.Sprintf(\"%g\", num) }\n\t\t\tnodes, prev, zip, dot = appendNode(nodes, current, prev, zip, dot)\n\t\t\tcontinue\n\t\t}\n\n\t\t// check if current token is an operator\n\t\toptype, isOperator := OperatorTypes[current.Head]\n\t\tif isOperator && len(nodes) > 0 {\n\t\t\tswitch optype {\n\t\t\tcase OperatorTypeSpread: prev.Spread = true\n\t\t\tcase OperatorTypeZip: zip = true\n\t\t\tcase OperatorTypeDot: dot = true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// current token must be an identifier\n\t\tnodes, prev, zip, dot = appendNode(nodes, current, prev, zip, dot)\n\t}\n\n\treturn nodes, tokens[i:]\n}", "func (this *Codec) deserialize(data string) *TreeNode {\n\tif len(data) == 0 {\n\t\treturn nil\n\t}\n\n\tres := strings.Split(data, \",\")\n\troot := &TreeNode{}\n\troot.Val, _ = strconv.Atoi(res[0])\n\tres = res[1:]\n\tqueue := make([]*TreeNode, 0)\n\tqueue = append(queue, root)\n\tfor len(queue) > 0 {\n\t\tif res[0] != \"#\" {\n\t\t\tleftVal, _ := strconv.Atoi(res[0])\n\t\t\tqueue[0].Left.Val = leftVal\n\t\t\tqueue = append(queue, queue[0].Left)\n\t\t}\n\t\tif res[1] != \"#\" {\n\t\t\trightVal, _ := strconv.Atoi(res[1])\n\t\t\tqueue[1].Right.Val = rightVal\n\t\t\tqueue = append(queue, queue[0].Right)\n\t\t}\n\t\tres = res[2:]\n\t\tqueue = queue[1:]\n\t}\n\treturn root\n}", "func (node *Node) printTree1(out *bytes.Buffer, isRight bool, indent string) {\n\n\tif (node.Left != nil) {\n\t\tstr := \" \"\n\t\tif isRight {\n\t\t\tstr = \" | \"\n\t\t}\n\t\tstr = indent + str\n\t\tnode.Left.printTree1(out, false, str)\n\t}\n\n\tout.Write([]byte(indent))\n\tif (isRight) {\n\t\tout.Write([]byte(\"\\\\\"))\n\t} else {\n\t\tout.Write([]byte (\"/\"))\n\t}\n\tout.Write([]byte(\"--\"))\n\n\tnode.printNodeValue(out)\n\n\tif (node.Right != nil) {\n\t\tstr := \" | \"\n\t\tif isRight {\n\t\t\tstr = \" \"\n\t\t}\n\t\tstr = indent + str\n\t\tnode.Right.printTree1(out, true, str)\n\t}\n\n}", "func ParseLevel(s string) Level {\n\tif strings.ToLower(s) == \"debug\" {\n\t\treturn DebugLevel\n\t} else if strings.ToLower(s) == \"info\" {\n\t\treturn InfoLevel\n\t} else if strings.ToLower(s) == \"warn\" || strings.ToLower(s) == \"warning\" {\n\t\treturn WarnLevel\n\t} else if strings.ToLower(s) == \"error\" {\n\t\treturn ErrorLevel\n\t} else if strings.ToLower(s) == \"panic\" {\n\t\treturn PanicLevel\n\t} else if strings.ToLower(s) == \"fatal\" {\n\t\treturn FatalLevel\n\t}\n\tErrorf(\"ParseLevel: unknown level: %s\", s)\n\treturn DebugLevel\n}", "func (t *Tree) Level() int {\n\treturn t.Depth() + 1\n}", "func (s *Statsd) parser() error {\n\tdefer s.wg.Done()\n\tvar packet []byte\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn nil\n\t\tcase packet = <-s.in:\n\t\t\tlines := strings.Split(string(packet), \"\\n\")\n\t\t\tfor _, line := range lines {\n\t\t\t\tline = strings.TrimSpace(line)\n\t\t\t\tif line != \"\" {\n\t\t\t\t\ts.parseStatsdLine(line)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (n *node) parse(args []string) error {\n\t//return the stored error\n\tif n.err != nil {\n\t\treturn n.err\n\t}\n\t//root node? take program from the arg list (assumes os.Args format)\n\tif n.parent == nil {\n\t\tprog := \"\"\n\t\tif len(args) > 0 {\n\t\t\tprog = args[0]\n\t\t\targs = args[1:]\n\t\t}\n\t\t//find default name for root-node\n\t\tif n.item.name == \"\" {\n\t\t\tif exe, err := os.Executable(); err == nil && exe != \"\" {\n\t\t\t\t//TODO: use filepath.EvalSymlinks first?\n\t\t\t\t_, n.item.name = path.Split(exe)\n\t\t\t} else if prog != \"\" {\n\t\t\t\t_, n.item.name = path.Split(prog)\n\t\t\t}\n\t\t\t//looks like weve been go-run, use package name?\n\t\t\tif n.item.name == \"main\" {\n\t\t\t\tif pkgPath := n.item.val.Type().PkgPath(); pkgPath != \"\" {\n\t\t\t\t\t_, n.item.name = path.Split(pkgPath)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t//add this node and its fields (recurses if has sub-commands)\n\tif err := n.addStructFields(defaultGroup, n.item.val); err != nil {\n\t\treturn err\n\t}\n\t//add user provided flagsets, will error if there is a naming collision\n\tif err := n.addFlagsets(); err != nil {\n\t\treturn err\n\t}\n\t//add help, version, etc flags\n\tif err := n.addInternalFlags(); err != nil {\n\t\treturn err\n\t}\n\t//find defaults from config's package\n\tn.setPkgDefaults()\n\t//add shortnames where possible\n\tfor _, item := range n.flags() {\n\t\tif !n.flagSkipShort[item.name] && item.shortName == \"\" && len(item.name) >= 2 {\n\t\t\tif s := item.name[0:1]; !n.flagNames[s] {\n\t\t\t\titem.shortName = s\n\t\t\t\tn.flagNames[s] = true\n\t\t\t}\n\t\t}\n\t}\n\t//create a new flagset, and link each item\n\tflagset := flag.NewFlagSet(n.item.name, flag.ContinueOnError)\n\tflagset.SetOutput(ioutil.Discard)\n\tfor _, item := range n.flags() {\n\t\tflagset.Var(item, item.name, \"\")\n\t\tif sn := item.shortName; sn != \"\" && sn != \"-\" {\n\t\t\tflagset.Var(item, sn, \"\")\n\t\t}\n\t}\n\tif err := flagset.Parse(args); err != nil {\n\t\t//insert flag errors into help text\n\t\tn.err = err\n\t\tn.internalOpts.Help = true\n\t}\n\t//handle help, version, install/uninstall\n\tif n.internalOpts.Help {\n\t\treturn exitError(n.Help())\n\t} else if n.internalOpts.Version {\n\t\treturn exitError(n.version)\n\t} else if n.internalOpts.Install {\n\t\treturn n.manageCompletion(false)\n\t} else if n.internalOpts.Uninstall {\n\t\treturn n.manageCompletion(true)\n\t}\n\t//first round of defaults, applying env variables where necessary\n\tfor _, item := range n.flags() {\n\t\tk := item.envName\n\t\tif item.set() || k == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tv := os.Getenv(k)\n\t\tif v == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\terr := item.Set(v)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"flag '%s' cannot set invalid env var (%s): %s\", item.name, k, err)\n\t\t}\n\t}\n\t//second round, unmarshal directly into the struct, overwrites envs and flags\n\tif c := n.internalOpts.ConfigPath; c != \"\" {\n\t\tb, err := ioutil.ReadFile(c)\n\t\tif err == nil {\n\t\t\tv := n.val.Addr().Interface() //*struct\n\t\t\terr = json.Unmarshal(b, v)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"invalid config file: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\t//get remaining args after extracting flags\n\tremaining := flagset.Args()\n\ti := 0\n\tfor {\n\t\tif len(n.args) == i {\n\t\t\tbreak\n\t\t}\n\t\titem := n.args[i]\n\t\tif len(remaining) == 0 && !item.set() && !item.slice {\n\t\t\treturn fmt.Errorf(\"argument '%s' is missing\", item.name)\n\t\t}\n\t\tif len(remaining) == 0 {\n\t\t\tbreak\n\t\t}\n\t\ts := remaining[0]\n\t\tif err := item.Set(s); err != nil {\n\t\t\treturn fmt.Errorf(\"argument '%s' is invalid: %s\", item.name, err)\n\t\t}\n\t\tremaining = remaining[1:]\n\t\t//use next arg?\n\t\tif !item.slice {\n\t\t\ti++\n\t\t}\n\t}\n\t//check min\n\tfor _, item := range n.args {\n\t\tif item.slice && item.sets < item.min {\n\t\t\treturn fmt.Errorf(\"argument '%s' has too few args (%d/%d)\", item.name, item.sets, item.min)\n\t\t}\n\t\tif item.slice && item.max != 0 && item.sets > item.max {\n\t\t\treturn fmt.Errorf(\"argument '%s' has too many args (%d/%d)\", item.name, item.sets, item.max)\n\t\t}\n\t}\n\t//use command? next arg can optionally match command\n\tif len(n.cmds) > 0 {\n\t\t// use next arg as command\n\t\targs := remaining\n\t\tcmd := \"\"\n\t\tmust := false\n\t\tif len(args) > 0 {\n\t\t\tcmd = args[0]\n\t\t\targs = args[1:]\n\t\t}\n\t\t// fallback to pre-initialised cmdname\n\t\tif cmd == \"\" {\n\t\t\tif n.cmdnameEnv != \"\" && os.Getenv(n.cmdnameEnv) != \"\" {\n\t\t\t\tcmd = os.Getenv(n.cmdnameEnv)\n\t\t\t} else if n.cmdname != nil && *n.cmdname != \"\" {\n\t\t\t\tcmd = *n.cmdname\n\t\t\t}\n\t\t\tmust = true\n\t\t}\n\t\t//matching command\n\t\tif cmd != \"\" {\n\t\t\tsub, exists := n.cmds[cmd]\n\t\t\tif must && !exists {\n\t\t\t\treturn fmt.Errorf(\"command '%s' does not exist\", cmd)\n\t\t\t}\n\t\t\tif exists {\n\t\t\t\t//store matched command\n\t\t\t\tn.cmd = sub\n\t\t\t\t//user wants command name to be set on their struct?\n\t\t\t\tif n.cmdname != nil {\n\t\t\t\t\t*n.cmdname = cmd\n\t\t\t\t}\n\t\t\t\t//tail recurse! if only...\n\t\t\t\treturn sub.parse(args)\n\t\t\t}\n\t\t}\n\t}\n\t//we *should* have consumed all args at this point.\n\t//this prevents: ./foo --bar 42 -z 21 ping --pong 7\n\t//where --pong 7 is ignored\n\tif len(remaining) != 0 {\n\t\treturn fmt.Errorf(\"unexpected arguments: %s\", strings.Join(remaining, \" \"))\n\t}\n\treturn nil\n}", "func parseDepth(option string) (*Option, error) {\n\tsplitoption := strings.Fields(option)\n\n\tif len(splitoption) == 0 {\n\t\treturn nil, fmt.Errorf(\"there is an unspecified depth option at an unknown line\")\n\t} else if len(splitoption) == 1 || len(splitoption) > 2 {\n\t\treturn nil, fmt.Errorf(\"there is a misconfigured depth option: %q.\\nIs it in format <option>:<whitespaces><regex><whitespaces><int>?\", option)\n\t}\n\n\tre, err := regexp.Compile(\"^\" + splitoption[0] + \"$\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"an error occurred compiling the regex for a depth option: %q\\n%v\", option, err)\n\t}\n\n\tdepth, err := strconv.Atoi(splitoption[1])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"an error occurred parsing the integer depth value of a depth option: %q\\n%v\", option, err)\n\t}\n\n\treturn &Option{\n\t\tCategory: \"depth\",\n\t\tRegex: map[int]*regexp.Regexp{0: re},\n\t\tValue: depth,\n\t}, nil\n}" ]
[ "0.58363867", "0.57795185", "0.5726749", "0.5672688", "0.5513443", "0.54041326", "0.5377714", "0.53473246", "0.5279886", "0.52769583", "0.5236373", "0.51813304", "0.5177548", "0.5163188", "0.5162664", "0.51265293", "0.5125453", "0.51065844", "0.50366247", "0.50332594", "0.49919423", "0.49731562", "0.49691823", "0.49683985", "0.4952382", "0.49441406", "0.4904424", "0.48904455", "0.4880363", "0.48784634", "0.48526648", "0.484006", "0.48323572", "0.4832127", "0.48272103", "0.48101205", "0.4808422", "0.48064846", "0.48037145", "0.48034537", "0.48034537", "0.48021692", "0.47974196", "0.47927454", "0.47894737", "0.47851", "0.47851", "0.47744563", "0.47520933", "0.4741324", "0.4720009", "0.47133532", "0.47080153", "0.4702058", "0.4701714", "0.47008127", "0.46955663", "0.4692698", "0.46861404", "0.46797442", "0.4671731", "0.46420404", "0.46372867", "0.46327052", "0.46317676", "0.4631375", "0.4630653", "0.46234682", "0.4610246", "0.460722", "0.45988205", "0.459676", "0.45925143", "0.45919105", "0.4586141", "0.4576975", "0.45700955", "0.45692956", "0.45663053", "0.45546487", "0.45542726", "0.45516622", "0.45445007", "0.45407248", "0.45402065", "0.45401645", "0.45379412", "0.45378032", "0.4533139", "0.45249134", "0.45236853", "0.4520014", "0.4517844", "0.45159897", "0.45140642", "0.4500284", "0.44983837", "0.4493573", "0.44922212", "0.44888833", "0.44888228" ]
0.0
-1
GetBroadcastPayload will return the object to send to all chat users.
func (e *UserDisabledEvent) GetBroadcastPayload() EventPayload { return EventPayload{ "type": ErrorUserDisabled, "id": e.ID, "timestamp": e.Timestamp, "user": e.User, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d delegate) GetBroadcasts(overhead, limit int) [][]byte { return nil }", "func (gd *GossipDelegate) GetBroadcasts(overhead, limit int) [][]byte {\n\tvar test [][]byte\n\ts1, _ := json.Marshal(gd.nodeId)\n\ts2, _ := json.Marshal(\"test_string\")\n\ttest = append(test, s1)\n\ttest = append(test, s2)\n\treturn test\n}", "func (broadcast *Broadcast) Object() obj.Broadcast {\n\treturn broadcast.msg\n}", "func (e *Ender) GetBroadcast() (*Broadcast, error) {\n\tif !e.valid {\n\t\treturn nil, errors.New(\"ender: unvalid process\")\n\t}\n\n\tsrvBro := modelBroadcastToSrvBroadcast(e.broadcastModel)\n\treturn srvBro, nil\n}", "func (d *mlDelegate) GetBroadcasts(overhead, limit int) [][]byte {\n\treq := reqOutgoing{\n\t\toverhead: overhead,\n\t\tlimit: limit,\n\t\tret: make(chan [][]byte),\n\t}\n\td.outgoing <- req\n\treturn <-req.ret\n}", "func (s *Starter) GetBroadcast() (*Broadcast, error) {\n\tif !s.valid {\n\t\treturn nil, errors.New(\"starter: unvalid process\")\n\t}\n\tsrvBro := modelBroadcastToSrvBroadcast(s.broadcastModel)\n\treturn srvBro, nil\n}", "func (s *server) GetBroadcasts(overhead, limit int) [][]byte {\n\treturn s.queue.GetBroadcasts(overhead, limit)\n}", "func (this User) broadcast(message interface{}) {\n mu.Lock()\n for _, client := range clients {\n if client.User.Id != this.Id {\n websocket.JSON.Send(client.Websocket, message)\n }\n }\n mu.Unlock()\n}", "func (p *WebsocketPayload) GetPayload() *jwt.Payload {\n\tp.RLock()\n\tdefer p.RUnlock()\n\n\treturn &p.Payload\n}", "func (m *DeviceAndAppManagementAssignmentFilter) GetPayloads()([]PayloadByFilterable) {\n val, err := m.GetBackingStore().Get(\"payloads\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]PayloadByFilterable)\n }\n return nil\n}", "func (d *delegate) GetBroadcasts(overhead, limit int) [][]byte {\n\td.mtx.RLock()\n\tdefer d.mtx.RUnlock()\n\treturn d.bcast.GetBroadcasts(overhead, limit)\n}", "func (n *GlobalNotification) Payload() map[string]interface{} {\r\n\treturn n.payload\r\n}", "func (d *delegate) GetBroadcasts(overhead, limit int) [][]byte {\n\td.mtx.RLock()\n\tdefer d.mtx.RUnlock()\n\tif d.bcast == nil {\n\t\tpanic(\"GetBroadcast before init\")\n\t}\n\treturn d.bcast.GetBroadcasts(overhead, limit)\n}", "func (bp *BasePayload) GetPayload() []byte {\n\treturn bp.Payload\n}", "func (a *API) GetUserInstancePayload(user *UserModel) (*UserInstanceEventPayload, error) {\n\trooms := &[]RoomModel{}\n\terr := a.DB.Model(user).Related(rooms, \"Rooms\").Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\troomIDs := []string{}\n\tfor _, r := range *rooms {\n\t\troomIDs = append(roomIDs, r.ID)\n\t}\n\treturn &UserInstanceEventPayload{\n\t\tID: user.ID,\n\t\tName: user.Name,\n\t\tPhoto: user.Photo,\n\t\tRoomIDs: roomIDs,\n\t}, nil\n}", "func broadcastData(_ context.Context, data interface{}) (interface{}, error) {\n\tbroadcastS07 := func(sinkData SinkData) {\n\t\ts07Data := S07Data{Topic: sinkData.Topic, Time: sinkData.Time, From: sinkData.From}\n\t\terr := json.Unmarshal(sinkData.Payload, &s07Data)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Unmarshal Payload error: %v\\n\", err)\n\t\t}\n\t\tfmt.Println(fmt.Sprintf(\"broadcast %s data: %v\", WebSocketRoom, s07Data))\n\t\twebSocketServer.BroadcastToRoom(\"\", WebSocketRoom, \"receive_sink_s07\", s07Data)\n\t}\n\n\tbroadcastS05 := func(sinkData SinkData) {\n\t\ts05Data := S05Data{Topic: sinkData.Topic, Time: sinkData.Time, From: sinkData.From}\n\t\terr := json.Unmarshal(sinkData.Payload, &s05Data)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Unmarshal Payload error: %v\\n\", err)\n\t\t}\n\t\tfmt.Println(fmt.Sprintf(\"broadcast %s data: %v\", WebSocketRoom, s05Data))\n\t\twebSocketServer.BroadcastToRoom(\"\", WebSocketRoom, \"receive_sink_s05\", s05Data)\n\t}\n\n\tif webSocketServer != nil && data != nil {\n\t\tsinkData := data.(SinkData)\n\t\tswitch sinkData.Topic {\n\t\tcase S07Topic:\n\t\t\tbroadcastS07(sinkData)\n\t\tcase S05Topic:\n\t\t\tbroadcastS05(sinkData)\n\t\t}\n\n\t\t//fmt.Println(fmt.Sprintf(\"broadcast %s data: %v\", WebSocketRoom, data))\n\t\t//webSocketServer.BroadcastToRoom(\"\", WebSocketRoom, \"receive_sink\", data)\n\t} else {\n\t\tlog.Printf(\"❌ Not eligible for broadcasting. webSocketServer=%v, data=%v\\n\", webSocketServer, data)\n\t}\n\n\treturn data, nil\n}", "func broadcastWebSocket(event models.Event) {\n\t//data, err := json.Marshal(event)\n\t//if err != nil {\n\t//\tbeego.Error(\"Fail to marshal event:\", err)\n\t//\treturn\n\t//}\n\n\tfor sub := subscribers.Front(); sub != nil; sub = sub.Next() {\n\t\t// Immediately send event to WebSocket users.\n\t\tws := sub.Value.(Subscriber).Conn\n\t\tname := sub.Value.(Subscriber).Name\n\t\tif name == event.User {\n\t\t\tcontinue\n\t\t}\n\t\tif ws != nil {\n\t\t\tlogs.Info(\"broadcastWebSocket:\",string(event.Content))\n\t\t\tif ws.WriteMessage(websocket.TextMessage, []byte(event.Content)) != nil {\n\t\t\t\t// User disconnected.\n\t\t\t\tlogs.Error(\"WriteMessage error\")\n\t\t\t\tunsubscribe <- sub.Value.(Subscriber).Name\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *User) Payload() map[string]interface{} {\n\treturn m.MapPayload(m)\n}", "func (p RPCServer) Broadcast(ctx context.Context, in *pb.MessageAndTopic) (*empty.Empty, error) {\n\treturn &empty.Empty{}, p.service.Broadcast(in.Topic, in.Data)\n}", "func (this *RTPPacket) GetPayload() []byte {\n\treturn this.payload\n}", "func broadcastWebSocket(event models.Event) {\n\tdata, err := json.Marshal(event)\n\tif err != nil {\n\t\tbeego.Error(\"Fail to marshal event:\", err)\n\t\treturn\n\t}\n\n\tfor sub := subscribers.Front(); sub != nil; sub = sub.Next() {\n\t\t// Immediately send event to WebSocket users.\n\t\tif sub.Value.(Subscriber).UserID != event.UserID {\n\t\t\tws := sub.Value.(Subscriber).Conn\n\t\t\tif ws != nil {\n\t\t\t\tif ws.WriteMessage(websocket.TextMessage, data) != nil {\n\t\t\t\t\t// User disconnected.\n\t\t\t\t\tunsubscribe <- sub.Value.(Subscriber).UserID\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *ChannelRemote) Broadcast(ctx context.Context, args *rpc.ArgsGroup, reply *rpc.ReplyGroup) error {\n\t//log.Debug(\">>>> %s broadcast: %s, %s\", c.connector.opts.WSAddr, args.Route, string(args.Payload))\n\tc.connector.GetSessionMap().Range(func(key, value interface{}) bool {\n\t\trouteId := msgService.GetMsgService().GetRouteId(args.Route)\n\t\tvalue.(Agent).PushMessage(routeId, args.Payload)\n\t\treturn true\n\t})\n\treturn nil\n}", "func (c *CoordinatorHelper) AllBroadcasts(ctx context.Context) ([]*storage.Broadcast, error) {\n\treturn c.broadcastStorage.GetAllBroadcasts(ctx)\n}", "func (b *Broadcast) Copy() *Broadcast {\n\tbroadcast := &Broadcast{\n\t\tRoomid: atomic.LoadInt64(&b.Roomid),\n\t\tUID: atomic.LoadInt64(&b.UID),\n\t\tUname: b.Uname,\n\t\tPopularity: atomic.LoadUint32(&b.Popularity),\n\t\tMaxPopularity: atomic.LoadUint32(&b.MaxPopularity),\n\t\tTitle: b.Title,\n\t\tUsercover: b.Usercover,\n\t\tKeyframe: b.Keyframe,\n\t\tLivetime: b.Livetime,\n\t\tEndtime: b.Endtime,\n\t\tParticipantduring10Min: atomic.LoadInt64(&b.Participantduring10Min),\n\t\tGoldCoin: atomic.LoadUint64(&b.GoldCoin),\n\t\tSilverCoin: atomic.LoadUint64(&b.SilverCoin),\n\t\tParticipant: atomic.LoadInt64(&b.Participant),\n\t\tGoldUser: atomic.LoadInt64(&b.GoldUser),\n\t\tDanmuCount: atomic.LoadUint64(&b.DanmuCount),\n\t}\n\treturn broadcast\n}", "func (bc *Broadcaster) Broadcast(b message.Broadcast) {\n bc.lobby.Broadcast(b)\n bc.uh.Broadcast(b)\n}", "func broadcastWebSocket(event models.Event) {\n\tdata, err := json.Marshal(event)\n\tif err != nil {\n\t\tbeego.Error(\"Fail to marshal event:\", err)\n\t\treturn\n\t}\n\n\tfor chatterItem := chatterLists.Front(); chatterItem != nil; chatterItem = chatterItem.Next() {\n\t\t// Immediately send event to WebSocket users.\n\t\tws := chatterItem.Value.(Chatter).Conn //断言\n\t\tif ws != nil {\n\t\t\tif ws.WriteMessage(websocket.TextMessage, data) != nil {\n\t\t\t\t// User disconnected.\n\t\t\t\texitChatterCh <- chatterItem.Value.(Chatter).Name\n\t\t\t}\n\t\t}\n\t}\n}", "func (e *Event) GetRawPayload() json.RawMessage {\n\tif e == nil || e.RawPayload == nil {\n\t\treturn json.RawMessage{}\n\t}\n\treturn *e.RawPayload\n}", "func (b *BeaconClient) Broadcast(filter string, payload payloads.Payload, interval int) error {\n\n\tif payload == nil {\n\t\tb.beacon.Silence()\n\t\tb.publishing = false\n\t\treturn nil\n\t}\n\n\tif TCFConfig.SetEncodingForPayloadsGlobally {\n\t\tb.SetEncoding(b.Encoding)\n\t}\n\n\tdata, encErr := payloads.Marshal(payload, b.Encoding)\n\tif encErr != nil {\n\t\treturn encErr\n\t}\n\n\tvar encodedPayload []byte\n\tswitch data.(type) {\n\tcase []byte:\n\t\tencodedPayload = data.([]byte)\n\t\tbreak\n\tcase string:\n\t\tencodedPayload = []byte(data.(string))\n\t\tbreak\n\tdefault:\n\t\treturn errors.New(\"Encoded data is not supported\")\n\t}\n\n\tasPayload := BeaconTransmit{\n\t\tChannel: filter,\n\t\tTransmit: encodedPayload,\n\t}\n\n\twrappedSend, encErr := msgpack.Marshal(asPayload)\n\tif encErr != nil {\n\t\treturn encErr\n\t}\n\n\tif len(wrappedSend) == 0 {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"tcf.beaconclient\",\n\t\t}).Error(\"No data to send, not sending\")\n\t\treturn nil\n\t}\n\n\t// We're just changing the payload, so don't start another publish.\n\tif b.hasStarted {\n\t\tb.beacon.Restart(wrappedSend)\n\t\treturn nil\n\t}\n\n\tlog.Info(\"BEACON: Starting Publish\")\n\tpubErr := b.beacon.Publish(wrappedSend)\n\tif pubErr != nil {\n\t\treturn pubErr\n\t}\n\n\tb.publishing = true\n\tb.hasStarted = true\n\treturn nil\n\n}", "func (app *App) GroupBroadcast(ctx context.Context, frontendType, groupName, route string, v interface{}) error {\n\tlogger.Log.Debugf(\"Type=Broadcast Route=%s, Data=%+v\", route, v)\n\n\tmembers, err := app.GroupMembers(ctx, groupName)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn app.sendDataToMembers(members, frontendType, route, v)\n}", "func (b *Broadcaster) Broadcast() chan<- interface{} {\n\tch := make(chan interface{})\n\tgo func() {\n\t\tfor c := range ch {\n\t\t\tc := c\n\t\t\tb.RLock()\n\t\t\tdefer b.RUnlock()\n\t\t\tfor _, listener := range b.listeners {\n\t\t\t\tlistener := listener\n\t\t\t\tlistener.Add(1)\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer listener.Done()\n\t\t\t\t\tlistener.ch <- c\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t}()\n\treturn ch\n}", "func (pushbots *PushBots) Broadcast(platform string, msg, sound, badge string, payload map[string]interface{}) error {\n\tvar supportsIos, supportsAndroid bool\n\n\tplatforms, err := generatePlatform(platform, true)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, val := range platforms.([]string) {\n\t\tif val == PlatformIos {\n\t\t\tsupportsIos = true\n\t\t} else if val == PlatformAndroid {\n\t\t\tsupportsAndroid = true\n\t\t}\n\t}\n\n\tif supportsIos == false && supportsAndroid == false {\n\t\treturn errors.New(\"Either android or ios must be specified as platforms\")\n\t}\n\n\tif msg == \"\" {\n\t\treturn errors.New(\"Message not specified\")\n\t}\n\n\tif badge == \"\" {\n\t\tbadge = \"0\"\n\t}\n\n\tif sound == \"\" {\n\t\tif supportsIos == true && supportsAndroid == false {\n\t\t\tsound = \"default\"\n\t\t} else {\n\t\t\treturn errors.New(\"No sound specified\")\n\t\t}\n\t}\n\n\targs := apiRequest{\n\t\tPlatform: platforms,\n\t\tMsg: msg,\n\t\tBadge: badge,\n\t\tSound: sound,\n\t\tPayload: payload,\n\t}\n\n\treturn checkAndReturn(pushbots.sendToEndpoint(\"broadcast\", args))\n}", "func GetBytesPayload(payl *common.Payload) ([]byte, error) {\n\tbytes, err := proto.Marshal(payl)\n\treturn bytes, err\n}", "func (o *NSQProducer) GetBroadcastAddress() string {\n\tif o == nil || o.BroadcastAddress == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.BroadcastAddress\n}", "func (r *RelayType) GetPayload() types.Message {\r\n\treturn &RelayAction{}\r\n}", "func (nf *NetworkPayload) GetPayload() []byte {\n\treturn nil\n}", "func (r *Room) Broadcast(actionType string, data interface{}) {\n\tfor s := range r.io.sockets {\n\t\tif _, ok := r.sockets[s]; !ok {\n\t\t\ts.Emit(actionType, data)\n\t\t}\n\t}\n}", "func (chatRoom *ChatRoom) Broadcast(data string) {\n\tfor _, client := range chatRoom.clients {\n\t\tclient.outgoing <- data\n\t}\n}", "func (r *Room) SendBroadcast(msg Message) {\n\tfor _, client := range r.clients {\n\t\tclient.WriteMessage(msg)\n\t}\n}", "func (mngr Manager) broadcast(payload []byte, msgType uint) (err error) {\n\tdata, err := rlp.EncodeToBytes(networkMessage{\n\t\tType: msgType,\n\t\tData: payload,\n\t\tTimestamp: uint64(time.Now().Unix()),\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\tmngr.node.Broadcast(data)\n\treturn\n}", "func (jbobject *BroadcastBroadcast) GetValue() *JavaLangObject {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"getValue\", \"java/lang/Object\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tretconv := javabind.NewJavaToGoCallable()\n\tdst := &javabind.Callable{}\n\tretconv.Dest(dst)\n\tif err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {\n\t\tpanic(err)\n\t}\n\tretconv.CleanUp()\n\tunique_x := &JavaLangObject{}\n\tunique_x.Callable = dst\n\treturn unique_x\n}", "func (server *ChatServer) BroadCast() {\n\n\tmessages := make([]*model.Message, 0)\n\tuserList := make(map[string]interface{})\nInfiLoop:\n\tfor {\n\t\tselect {\n\t\tcase message := <-server.NewMessage:\n\t\t\tmessages = append(messages, message)\n\t\t\tfor _, client := range server.OnlineUsers {\n\t\t\t\tclient.SendSingleMessage(message)\n\t\t\t}\n\t\tcase <-server.NewUser:\n\t\t\t//\tuser[\"username\"] = *newUser.Username\n\t\t\t//\tuser[\"created_at\"] = time.Now().String()\n\t\t\tuserList[\"message_type\"] = \"user_list\"\n\t\t\tus := []model.User{}\n\t\t\tfor _, c := range server.OnlineUsers {\n\t\t\t\tus = append(us, c.User)\n\t\t\t}\n\t\t\tuserList[\"list\"] = us\n\t\tdefault:\n\t\t\tbreak InfiLoop\n\t\t}\n\t}\n\t// if len(userList) > 0 {\n\t// \tfor _, client := range server.OnlineUsers {\n\t// \t\tclient.Socket.WriteJSON([]map[string]interface{}{\n\t// \t\t\tuserList,\n\t// \t\t})\n\t// \t}\n\t// }\n\n\t// if len(messages) > 0 {\n\t// \tfor _, client := range server.OnlineUsers {\n\t// \t\tclient.Send(messages)\n\t// \t}\n\t// }\n}", "func (m *Message) Payload() payload.Payload {\n\treturn m.payload\n}", "func (input PacketChannel) PayloadOnly() <-chan []byte {\n\toutput := make(chan []byte)\n\tgo func() {\n\t\tdefer close(output)\n\t\tfor packet := range input {\n\t\t\toutput <- packet.Payload\n\t\t}\n\t}()\n\treturn output\n}", "func (so *SocketOptions) GetBroadcast() bool {\n\treturn so.broadcastEnabled.Load() != 0\n}", "func broadcastMessage(payload _message, logMessage string) {\n\tfor _, v := range peers {\n\t\t// fmt.Println(\"Sending to\", v)\n\t\tgo sendToAddr(payload, v, logMessage)\n\t}\n}", "func (ps *PeerStore) Broadcast(push msg.Push) {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\tfor _, p := range ps.peers {\n\t\tp.Push(push)\n\t}\n}", "func broadcast(sub subscription, variables map[string]interface{}) {\n\n\tif sub.Query == \"\" {\n\t\treturn\n\t}\n\n\tresult := graphql.Do(graphql.Params{\n\t\tSchema: schema.Root,\n\t\tRequestString: sub.Query,\n\t\tVariableValues: variables,\n\t})\n\n\tdata, err := json.Marshal(map[string]interface{}{\n\t\t\"type\": \"data\",\n\t\t\"id\": sub.RefID,\n\t\t\"payload\": result,\n\t})\n\n\tif err != nil {\n\t\tlogs.Error(err)\n\t\treturn\n\t}\n\n\twebsocket.Message.Send(sub.Conn, string(data))\n}", "func (e DomainEvent) Payload() interface{} {\n\treturn e.payload\n}", "func (m *NestedTestAllTypes) GetPayload() (x *TestAllTypes) {\n\tif m == nil {\n\t\treturn x\n\t}\n\treturn m.Payload\n}", "func (b *Broadcast) Broadcast(v interface{}) {\n\tb.broadcast <- v\n}", "func (ds *DepositToStake) Payload() []byte { return ds.payload }", "func (s *server) Broadcast(ctx context.Context, in *tt.Message) (*tt.Response, error) {\n\tlog.Debug().Msgf(\"Got a broadcast message with length: %d\", len(in.Bytes))\n\tif len(in.Bytes) == 0 {\n\t\tlog.Warn().Msgf(\"Cannot send an empty broadcast message! Ignoring...\")\n\t\treturn &tt.Response{Code: tt.Response_ERROR}, errors.New(\"Cannot send a zero byte broadcast\")\n\t}\n\terr := smudge.BroadcastBytes(in.Bytes)\n\tif err != nil {\n\t\tlog.Warn().Msgf(\"Error sending message: %v\", err)\n\t\treturn &tt.Response{Code: tt.Response_ERROR}, err\n\t}\n\tlog.Info().Msg(\"Send broadcast to the network\")\n\treturn &tt.Response{Code: tt.Response_OK}, nil\n}", "func (a *API) GetRoomInstancePayload(room *RoomModel) (*RoomInstanceEventPayload, error) {\n\tusers := &[]UserModel{}\n\terr := a.DB.Model(room).\n\t\tRelated(users, \"Members\").Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tuserIDs := []string{}\n\tfor _, u := range *users {\n\t\tuserIDs = append(userIDs, u.ID)\n\t}\n\treturn &RoomInstanceEventPayload{\n\t\tID: room.ID,\n\t\tName: room.Name,\n\t\tPhoto: room.Photo,\n\t\tDescription: room.Description,\n\t\tMemberIDs: userIDs,\n\t}, nil\n}", "func GetPayload(ctx context.Context, hostnameData hostname.Data) *Payload {\n\tmeta := hostMetadataUtils.GetMeta(ctx, config.Datadog)\n\tmeta.Hostname = hostnameData.Hostname\n\n\tp := &Payload{\n\t\tOs: osName,\n\t\tAgentFlavor: flavor.GetFlavor(),\n\t\tPythonVersion: python.GetPythonInfo(),\n\t\tSystemStats: getSystemStats(),\n\t\tMeta: meta,\n\t\tHostTags: hostMetadataUtils.GetHostTags(ctx, false, config.Datadog),\n\t\tContainerMeta: containerMetadata.Get(1 * time.Second),\n\t\tNetworkMeta: getNetworkMeta(ctx),\n\t\tLogsMeta: getLogsMeta(),\n\t\tInstallMethod: getInstallMethod(getInstallInfoPath()),\n\t\tProxyMeta: getProxyMeta(),\n\t\tOtlpMeta: getOtlpMeta(),\n\t}\n\n\t// Cache the metadata for use in other payloads\n\tkey := buildKey(\"payload\")\n\tcache.Cache.Set(key, p, cache.NoExpiration)\n\n\treturn p\n}", "func (e *Exchange) Payload() []byte {\n\treturn e.payload\n}", "func (unsubscribe UnsubscribePacket) Marshall() []byte {\n\tfixedLength := 2\n\tbuf := make([]byte, fixedLength+unsubscribe.PayloadSize())\n\n\t// Header\n\tfixedHeaderFlags := 2 // mandatory value\n\tbuf[0] = byte(unsubscribeType<<4 | fixedHeaderFlags)\n\tbuf[1] = byte(unsubscribe.PayloadSize())\n\n\t// Packet ID (it must be non zero, so we use 1 if value is zero to generate a valid packet)\n\tid := 1\n\tif unsubscribe.ID > id {\n\t\tid = unsubscribe.ID\n\t}\n\tbinary.BigEndian.PutUint16(buf[2:4], uint16(id))\n\n\t// Topics name\n\tnextPos := 4\n\tfor _, topic := range unsubscribe.Topics {\n\t\tnextPos = copyBufferString(buf, nextPos, topic)\n\t}\n\n\treturn buf\n}", "func (o *GetChatroomsIDOK) SetPayload(payload *apimodel.Chatroom) {\n\to.Payload = payload\n}", "func BroadcastAll(message socket.RawMessage) {\n\tinstance.mutex.RLock()\n\tinstance.broadcast(uuid.Nil, message)\n\tinstance.mutex.RUnlock()\n}", "func (b *Broker) Broadcast(payload interface{}, topics ...string) {\n\tfor _, topic := range topics {\n\t\tfor _, s := range b.topics[topic] {\n\t\t\tm := &Message{\n\t\t\t\ttopic: topic,\n\t\t\t\tpayload: payload,\n\t\t\t\tcreatedAt: time.Now().UnixNano(),\n\t\t\t}\n\t\t\tgo (func(s *Subscriber) {\n\t\t\t\ts.Signal(m)\n\t\t\t})(s)\n\t\t}\n\t}\n}", "func (d *DataFabric) ReadBroadcast(fun uint8, off uint16) (uint64, error) {\n\treturn d.ReadIndirect(BROAD, fun, off)\n}", "func (env *Env) broadcastMessage(msg chat.Message) error {\n\tuserIDs, err := env.getSubscribedUsers(msg.ChannelID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, userID := range userIDs {\n\t\tmsgChannels, ok := env.Channels.GetUserChannels(userID)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tsendMessageToChannels(msg, msgChannels)\n\t}\n\treturn nil\n}", "func (s *Switch) Broadcast(ctx context.Context, protocol string, payload []byte) error {\n\t// context should already contain a requestID for an incoming message\n\tif _, ok := log.ExtractRequestID(ctx); !ok {\n\t\tctx = log.WithNewRequestID(ctx)\n\t\ts.logger.WithContext(ctx).Info(\"new broadcast message with no requestId, generated one\")\n\t}\n\treturn s.gossip.Broadcast(ctx, payload, protocol)\n}", "func (h *Hub) Broadcast(conn *websocket.Conn, messageType int, data []byte) {\n\tlogger.Info.Println(\"broadcast message:\", string(data), \"message type:\", messageType)\n\tfor client, _ := range h.Clients {\n\t\tif client != conn {\n\t\t\tclient.WriteMessage(messageType, data)\n\t\t}\n\t}\n}", "func broadcast(ips []string, action string, actionId id) []id {\n\tvar list []id\n\tvar wg sync.WaitGroup\n\twg.Add(len(ips))\n\n\tfor i, ip := range ips {\n\t\tgo func(i int, ip string, actionId id) {\n\t\t\tdefer wg.Done()\n\n\t\t\thostName := ip\n\t\t\tportNum := \"10001\"\n\t\t\tservice := hostName + \":\" + portNum\n\t\t\tRemoteAddr, _ := net.ResolveUDPAddr(\"udp\", service)\n\t\t\tconn, _ := net.DialUDP(\"udp\", nil, RemoteAddr)\n\t\t\tdefer conn.Close()\n\n\t\t\tm := message{\n\t\t\t\tAction: action,\n\t\t\t\tData: nil,\n\t\t\t\tThe_id: actionId,\n\t\t\t}\n\t\t\tb, _ := json.Marshal(m)\n\n\t\t\t//send it to the connection\n\t\t\tconn.Write(b)\n\n\t\t\t//case of Others Joining the group, send them the current Memlist\n\t\t\tif GetOutboundIP() != \"172.22.154.22:10001\" && ip == \"172.22.154.22\" && action == \"join\" {\n\t\t\t\t// receive message from server\n\t\t\t\tbuffer := make([]byte, 1024)\n\t\t\t\tn, _, err := conn.ReadFromUDP(buffer)\n\t\t\t\tCheckError(err)\n\t\t\t\terr = json.Unmarshal(buffer[0:n], &list)\n\t\t\t\tCheckError(err)\n\t\t\t}\n\t\t}(i, ip, actionId)\n\t}\n\twg.Wait()\n\treturn list\n}", "func (msg *Message) Payload() []byte {\n\treturn msg.Encrypted\n}", "func GetPayload(e *common.Envelope) (*common.Payload, error) {\n\tpayload := &common.Payload{}\n\terr := proto.Unmarshal(e.Payload, payload)\n\treturn payload, err\n}", "func (sio *SocketIO) Broadcast(data interface{}) {\n\tsio.BroadcastExcept(nil, data)\n}", "func (app *MgmtApp) Broadcast(msg ServerMessage) {\n app.hub.Broadcast(msg)\n}", "func (m *Message) Broadcast() bool {\n\treturn m.Flags.Type().Broadcast()\n}", "func (c *CoordinatorHelper) BroadcastAll(\n\tctx context.Context,\n) error {\n\treturn c.broadcastStorage.BroadcastAll(ctx, true)\n}", "func broadcastWebSocket(outChannel chan string, ws *websocket.Conn) {\n\n for {\n select {\n case data := <-outChannel:\n ws.WriteMessage(websocket.TextMessage, []byte(data))\n break\n }\n }\n}", "func (h *Hub) HandleBroadcast(w http.ResponseWriter, r *http.Request) {\n\th.SendToAll([]byte(\"Hi ALL!\"))\n}", "func (s *inmemKeystore) GetPayload(keyprotectID string) (string, secrets.KeyStates, error) {\n\t_, _, extractErr := extractBluemixOrgSpace(s)\n\tif extractErr != nil {\n\t\treturn \"\", secrets.Destroyed, extractErr\n\t}\n\n\tif _, ok := secretStore[keyprotectID]; !ok {\n\t\treturn \"\", secrets.Destroyed, ErrNotFound\n\t}\n\n\tpayload := secretStore[keyprotectID].payload\n\n\treturn payload, secrets.Activation, nil\n}", "func (rs *Restake) Payload() []byte { return rs.payload }", "func (a *API) GetRoomParticipantPayload(\n\troom *RoomModel,\n\tuserID string,\n) (*RoomParticipantEventPayload, error) {\n\tuserIDs := []string{}\n\tfor _, u := range room.Members {\n\t\tuserIDs = append(userIDs, u.ID)\n\t}\n\treturn &RoomParticipantEventPayload{\n\t\tRoomID: room.ID,\n\t\tUserID: userID,\n\t\tParticipantIDs: userIDs,\n\t}, nil\n}", "func (hub *WSHub) SendBroadcast(messageType int, msg []byte) {\n\thub.mu.Lock()\n\tdefer hub.mu.Unlock()\n\tfor conn := range hub.connections {\n\t\t// _, err := conn.Write(msg)\n\t\terr := conn.WriteMessage(messageType, msg)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tdelete(hub.connections, conn)\n\t\t\tconn.Close()\n\t\t}\n\t}\n}", "func (b Message) Payload() string {\n\treturn b.payload\n}", "func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\t// Fast path the default case\n\tif q.lenLocked() == 0 {\n\t\treturn nil\n\t}\n\n\ttransmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes())\n\n\tvar (\n\t\tbytesUsed int\n\t\ttoSend [][]byte\n\t\treinsert []*limitedBroadcast\n\t)\n\n\t// Visit fresher items first, but only look at stuff that will fit.\n\t// We'll go tier by tier, grabbing the largest items first.\n\tminTr, maxTr := q.getTransmitRange()\n\tfor transmits := minTr; transmits <= maxTr; /*do not advance automatically*/ {\n\t\tfree := int64(limit - bytesUsed - overhead)\n\t\tif free <= 0 {\n\t\t\tbreak // bail out early\n\t\t}\n\n\t\t// Search for the least element on a given tier (by transmit count) as\n\t\t// defined in the limitedBroadcast.Less function that will fit into our\n\t\t// remaining space.\n\t\tgreaterOrEqual := &limitedBroadcast{\n\t\t\ttransmits: transmits,\n\t\t\tmsgLen: free,\n\t\t\tid: math.MaxInt64,\n\t\t}\n\t\tlessThan := &limitedBroadcast{\n\t\t\ttransmits: transmits + 1,\n\t\t\tmsgLen: math.MaxInt64,\n\t\t\tid: math.MaxInt64,\n\t\t}\n\t\tvar keep *limitedBroadcast\n\t\tq.tq.AscendRange(greaterOrEqual, lessThan, func(item btree.Item) bool {\n\t\t\tcur := item.(*limitedBroadcast)\n\t\t\t// Check if this is within our limits\n\t\t\tif int64(len(cur.b.Message())) > free {\n\t\t\t\t// If this happens it's a bug in the datastructure or\n\t\t\t\t// surrounding use doing something like having len(Message())\n\t\t\t\t// change over time. There's enough going on here that it's\n\t\t\t\t// probably sane to just skip it and move on for now.\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tkeep = cur\n\t\t\treturn false\n\t\t})\n\t\tif keep == nil {\n\t\t\t// No more items of an appropriate size in the tier.\n\t\t\ttransmits++\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg := keep.b.Message()\n\n\t\t// Add to slice to send\n\t\tbytesUsed += overhead + len(msg)\n\t\ttoSend = append(toSend, msg)\n\n\t\t// Check if we should stop transmission\n\t\tq.deleteItem(keep)\n\t\tif keep.transmits+1 >= transmitLimit {\n\t\t\tkeep.b.Finished()\n\t\t} else {\n\t\t\t// We need to bump this item down to another transmit tier, but\n\t\t\t// because it would be in the same direction that we're walking the\n\t\t\t// tiers, we will have to delay the reinsertion until we are\n\t\t\t// finished our search. Otherwise we'll possibly re-add the message\n\t\t\t// when we ascend to the next tier.\n\t\t\tkeep.transmits++\n\t\t\treinsert = append(reinsert, keep)\n\t\t}\n\t}\n\n\tfor _, cur := range reinsert {\n\t\tq.addItem(cur)\n\t}\n\n\treturn toSend\n}", "func (o *NotificationConfig) GetPayload() string {\n\tif o == nil || o.Payload == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Payload\n}", "func (s *Websocket) broadcast(msg models.Message) {\n\ts.hmu.RLock()\n\n\tfor _, receiver := range s.hub {\n\t\tif err := receiver.Conn.WriteJSON(msg); err != nil {\n\t\t\ts.logger.Errorf(\"error sending message: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\ts.history = append(s.history, msg)\n\tif err := s.historyRepo.Add(msg); err != nil {\n\t\ts.logger.Errorf(\"error writing history to db: %v\", err)\n\t}\n\n\ts.hmu.RUnlock()\n}", "func (m *EventLog) Payload() map[string]interface{} {\n\treturn m.MapPayload(m)\n}", "func (g *game) broadcast(msg []byte) {\n\tfor i := 0; i < len(g.Players); i++ {\n\t\tif g.Players[i].Connection != nil {\n\t\t\tg.Players[i].Connection.Outbound <- msg\n\t\t}\n\t}\n}", "func (hub *Hub) BroadcastMessage(data string) {\n\thub.BroadcastUserList()\n\tmsg := ParseMessage(data)\n\tfmt.Println(msg.DisplayString())\n\tlog.Println(msg.DisplayString())\n\n\tif msg.Receivers == All {\n\t\tfor _, client := range hub.clientMap {\n\t\t\tclient.outboundMessage <- msg.ToString()\n\t\t}\n\n\t} else {\n\t\tfor _, clientID := range strings.Split(msg.Receivers, \",\") {\n\t\t\tclient, ok := hub.clientMap[clientID]\n\t\t\tif ok {\n\t\t\t\tclient.outboundMessage <- msg.ToString()\n\t\t\t}\n\t\t}\n\t}\n}", "func (cl *Client) BroadcastUserList() {\n\tpresentUsers := cl.Room.PresentUsers()\n\tserialized, err := json.Marshal(presentUsers)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcl.Send(msgPresentUsers, string(serialized))\n}", "func (radius *RADIUS) Payload() []byte {\n\treturn radius.BaseLayer.Payload\n}", "func GetBroadcastCommand(cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"broadcast [file_path]\",\n\t\tShort: \"Broadcast transactions generated offline\",\n\t\tLong: strings.TrimSpace(`Broadcast transactions created with the --generate-only\nflag and signed with the sign command. Read a transaction from [file_path] and\nbroadcast it to a node. If you supply a dash (-) argument in place of an input\nfilename, the command reads from standard input.\n\n$ <appcli> tx broadcast ./mytxn.json\n`),\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) (err error) {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\t\t\tstdTx, err := utils.ReadStdTxFromFile(cliCtx.Codec, args[0])\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttxBytes, err := cliCtx.Codec.MarshalBinaryLengthPrefixed(stdTx)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tres, err := cliCtx.BroadcastTx(txBytes)\n\t\t\tcliCtx.PrintOutput(res) // nolint:errcheck\n\t\t\treturn err\n\t\t},\n\t}\n\n\treturn flags.PostCommands(cmd)[0]\n}", "func broadcastUDP() {\r\n\tif !IsJoin {\r\n\t\treturn\r\n\t}\r\n\tif CurrentProtocol == \"All2All\" {\r\n\t\tselfNode := make(map[string]Membership)\r\n\t\tMT.Lock()\r\n\t\tselfNode[MyID] = MembershipList[MyID]\r\n\t\tMT.Unlock()\r\n\t\tjsonString, err := json.Marshal(selfNode)\r\n\t\tif err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t\tmsg := string(jsonString)\r\n\t\tMT.Lock()\r\n\t\tfor id, _ := range MembershipList {\r\n\t\t\t_, okLeave := LeaveNodes[id]\r\n\t\t\t_, okFail := FailedNodes[id]\r\n\t\t\t// dont send to myself, leave nodes and fail nodes\r\n\t\t\tif id != MyID && !okLeave && !okFail {\r\n\t\t\t\t//Logger.Info(\"All2All: \" + helper.ConvertIDtoVM(id) + fmt.Sprintf(\" %v\", len(msg)))\r\n\t\t\t\tsendMsgToID(id, msg)\r\n\t\t\t}\r\n\t\t}\r\n\t\tMT.Unlock()\r\n\t} else if CurrentProtocol == \"Gossip\" {\r\n\t\tMT.Lock()\r\n\t\tjsonString, err := json.Marshal(MembershipList)\r\n\t\tMT.Unlock()\r\n\t\tif err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t\tmsg := string(jsonString)\r\n\t\tidList := selectGossipID()\r\n\t\t// dont send to myself, leave nodes and fail nodes\r\n\t\tfor _, id := range idList {\r\n\t\t\t//Logger.Info(\"Gossip: \" + helper.ConvertIDtoVM(id) + fmt.Sprintf(\" %v\", len(msg)))\r\n\t\t\tsendMsgToID(id, msg)\r\n\t\t}\r\n\t}\r\n}", "func fetchAllPlayingBroadcasts() ([]*Broadcast, error) {\n\tbroadcastFinder := models.NewBroadcastFinder().IsPlaying()\n\tif err := broadcastFinder.Do(); err != nil {\n\t\treturn nil, err\n\t}\n\tmBroadcasts := broadcastFinder.Result()\n\tsrvBroadcasts := []*Broadcast{}\n\tfor i := range mBroadcasts {\n\t\tsrvBroadcast := modelBroadcastToSrvBroadcast(mBroadcasts[i])\n\t\tsrvBroadcasts = append(srvBroadcasts, srvBroadcast)\n\t}\n\treturn srvBroadcasts, nil\n}", "func (r CampaignRequest) Payload() *model.Payload {\n\tif r.AdvertiserID <= 0 {\n\t\treturn nil\n\t}\n\tp := new(model.Payload)\n\tp.AddValue(\"advertiser_id\", strconv.FormatInt(r.AdvertiserID, 10))\n\treturn p\n}", "func (messageBus MessageBus) Broadcast(message Message, data interface{}) {\n\tfor _, listener := range messageBus.Listeners {\n\t\tlistener.Notify(message, data)\n\t}\n}", "func (b *crdtBroadcaster) Broadcast(data []byte) error {\n\treturn b.client.Send(&pb.PubSubRequest{\n\t\tRequestType: pb.PSREQTYPE_PS_PUBLISH,\n\t\tTopics: []string{b.topic},\n\t\tData: data,\n\t})\n}", "func (c *ChannelsCreateChannelRequest) SetBroadcast(value bool) {\n\tif value {\n\t\tc.Flags.Set(0)\n\t\tc.Broadcast = true\n\t} else {\n\t\tc.Flags.Unset(0)\n\t\tc.Broadcast = false\n\t}\n}", "func (s *Session) Broadcast(key string, val interface{}) {\n\ts.broadcasts[key] = val\n}", "func (g *GlobalMessages) Get() ([]byte, error) {\n\tg.locker.Lock()\n\tdefer g.locker.Unlock()\n\tif len(g.Events) == 0 {\n\t\treturn []byte{}, nil\n\t}\n\tresult, err := json.Marshal(g.Events)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tg.Events = nil\n\treturn result, nil\n}", "func (c *CoordinatorHelper) Broadcast(\n\tctx context.Context,\n\tdbTx storage.DatabaseTransaction,\n\tidentifier string,\n\tnetwork *types.NetworkIdentifier,\n\tintent []*types.Operation,\n\ttransactionIdentifier *types.TransactionIdentifier,\n\tpayload string,\n\tconfirmationDepth int64,\n) error {\n\treturn c.broadcastStorage.Broadcast(\n\t\tctx,\n\t\tdbTx,\n\t\tidentifier,\n\t\tnetwork,\n\t\tintent,\n\t\ttransactionIdentifier,\n\t\tpayload,\n\t\tconfirmationDepth,\n\t)\n}", "func (o WebhookOutput) Payload() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Webhook) pulumi.StringOutput { return v.Payload }).(pulumi.StringOutput)\n}", "func GetPayload(e *cb.Envelope) (*cb.Payload, error) {\n\tpayload := &cb.Payload{}\n\terr := proto.Unmarshal(e.Payload, payload)\n\treturn payload, errors.Wrap(err, \"error unmarshaling Payload\")\n}", "func NewBroadcaster(buffer_len int) *Broadcast {\n\t//fmt.Println(\"test\")\n\tb := &Broadcast{}\n\n\tb.buff = buffer_len\n\n\tb.sender = make(chan interface{}, b.buff)\n\n\tgo func() {\n\t\tfor s := range b.sender {\n\t\t\tfor _, r := range b.receiver {\n\t\t\t\tr <- s\n\t\t\t}\n\t\t}\n\n\t\t// The sender channel is closed\n\t\tfor _, r := range b.receiver {\n\t\t\tclose(r)\n\t\t}\n\t}()\n\treturn b\n}", "func (r *Repeater) Broadcast(message []byte) error {\n\tr.delivery <- message\n\treturn nil\n}", "func (o *GetFleetsFleetIDMembersForbidden) SetPayload(payload *models.GetFleetsFleetIDMembersForbiddenBody) {\n\to.Payload = payload\n}" ]
[ "0.588773", "0.58685297", "0.5810341", "0.5681333", "0.5568869", "0.55119294", "0.5473713", "0.54418457", "0.54224706", "0.54075783", "0.5393018", "0.5365551", "0.53186834", "0.52261513", "0.52187634", "0.5189766", "0.5181955", "0.5167827", "0.5162568", "0.51217777", "0.51200557", "0.5102809", "0.50804055", "0.50394547", "0.49933913", "0.4972747", "0.4938348", "0.49368972", "0.49136433", "0.49110883", "0.48993307", "0.48806873", "0.4872853", "0.4863389", "0.4844866", "0.48397782", "0.48305258", "0.48296455", "0.48210445", "0.4815964", "0.4804779", "0.47995427", "0.4789867", "0.4781454", "0.47777286", "0.47609857", "0.47546983", "0.4753078", "0.47509998", "0.4731874", "0.4721235", "0.47176182", "0.471571", "0.47093076", "0.4707482", "0.47054106", "0.47035825", "0.46938893", "0.46916184", "0.46858612", "0.46739194", "0.46552995", "0.4652927", "0.46365967", "0.46268174", "0.46152943", "0.46006894", "0.45984322", "0.4582062", "0.45767355", "0.45766768", "0.45630154", "0.4560754", "0.45581314", "0.45553032", "0.4554474", "0.45524272", "0.4547375", "0.4528454", "0.45217508", "0.45183998", "0.4513328", "0.45089537", "0.45074204", "0.4502607", "0.4501197", "0.45005083", "0.44989392", "0.44929484", "0.44822952", "0.44816715", "0.44800678", "0.4478847", "0.4478795", "0.4476692", "0.44644278", "0.4454622", "0.44451547", "0.44355562", "0.44323757" ]
0.71547204
0
CreateAlbum queries the Imgur API in order to create an anonymous album, using a client ID
func CreateAlbum(title string, clientID string) (albumID, deleteHash interface{}) { apiURL := "https://api.imgur.com" resource := "/3/album/" data := url.Values{} data.Set("title", title) u, _ := url.ParseRequestURI(apiURL) u.Path = resource urlStr := u.String() // "https://api.com/user/" client := &http.Client{} r, _ := http.NewRequest("POST", urlStr, strings.NewReader(data.Encode())) // URL-encoded payload r.Header.Add("Authorization", "Client-ID "+clientID) r.Header.Add("Content-Type", "application/x-www-form-urlencoded") r.Header.Add("Content-Length", strconv.Itoa(len(data.Encode()))) resp, _ := client.Do(r) var result map[string]interface{} json.NewDecoder(resp.Body).Decode(&result) nestedMap := result["data"] newMap, _ := nestedMap.(map[string]interface{}) albumID = newMap["id"] deleteHash = newMap["deletehash"] fmt.Println(color.GreenString("\n[+]"), "Successfully created an album with the following values:") fmt.Println(color.GreenString("albumID:"), albumID, color.GreenString("Album DeleteHash:"), deleteHash) fmt.Println(" ") return albumID, deleteHash }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Service) Create(ctx context.Context, title string) (*Album, error) {\n\treq := &photoslibrary.CreateAlbumRequest{\n\t\tAlbum: &photoslibrary.Album{Title: title},\n\t}\n\tres, err := s.photos.Create(req).Context(ctx).Do()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating album: %w\", err)\n\t}\n\talbum := toAlbum(res)\n\treturn &album, nil\n}", "func CreateAlbum(req CreateAlbumRequest) error {\n\tinsertAlbum := `INSERT INTO Album (year, title, date) VALUES (?, ?, ?)`\n\tif _, err := configure.SQL.Query(insertAlbum, req.Year, req.AlbumTitle, req.AlbumDate); err != nil {\n\t\tlog.Println(\"Failed on inserting album\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *AlbumsService) Create(createalbumrequest *CreateAlbumRequest) *AlbumsCreateCall {\n\tc := &AlbumsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.createalbumrequest = createalbumrequest\n\treturn c\n}", "func (p *Photos) CreateAlbum(ctx context.Context, title string, uploadItems []UploadItem) ([]*AddResult, error) {\n\tlog.Printf(\"Creating album %s\", title)\n\talbum, err := p.service.CreateAlbum(ctx, &photoslibrary.CreateAlbumRequest{\n\t\tAlbum: &photoslibrary.Album{Title: title},\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create an album: %s\", err)\n\t}\n\treturn p.add(ctx, uploadItems, photoslibrary.BatchCreateMediaItemsRequest{\n\t\tAlbumId: album.Id,\n\t\tAlbumPosition: &photoslibrary.AlbumPosition{Position: \"LAST_IN_ALBUM\"},\n\t}), nil\n}", "func DeleteAlbum(albumDeleteHash string, clientID string) {\n\turl := \"https://api.imgur.com/3/album/\" + albumDeleteHash\n\tmethod := \"DELETE\"\n\n\tpayload := &bytes.Buffer{}\n\twriter := multipart.NewWriter(payload)\n\terr := writer.Close()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(method, url, payload)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treq.Header.Add(\"Authorization\", \"Client-ID \"+clientID)\n\n\treq.Header.Set(\"Content-Type\", writer.FormDataContentType())\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tif strings.Contains(string(body), \"200\") {\n\t\tfmt.Println(color.GreenString(\"[+]\"), \"Delete was a success\")\n\t}\n\n}", "func (service *AlbumDiscogService) CreateAlbumDiscog(attributes *library.AlbumAttributes) error {\n\tif service.insert == nil {\n\t\tstmt, err := service.prepareInsert()\n\t\tif err != nil {\n\t\t\tservice.session.Logger.Println(err)\n\t\t\treturn err\n\t\t}\n\t\tservice.insert = stmt\n\t}\n\n\t_, err := service.insert.Exec(\n\t\tattributes.ArtistName, attributes.ArtistSort,\n\t\tattributes.Name,\n\t\tattributes.Sort,\n\t\tattributes.ReleaseDate,\n\t\tattributes.ArtistName, attributes.ArtistSort,\n\t\tattributes.GenreName)\n\n\tif err != nil {\n\t\tservice.session.Logger.Println(err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewAlbum() *Album {\n\talbum := new(Album)\n\talbum.Key = \"\"\n\talbum.Id = 0\n\talbum.EncodedKey = \"\"\n\talbum.Title = \"\"\n\talbum.Artists = dna.StringArray{}\n\talbum.Coverart = \"\"\n\talbum.Topics = dna.StringArray{}\n\talbum.Plays = 0\n\talbum.Songids = dna.IntArray{}\n\talbum.YearReleased = \"\"\n\talbum.Nsongs = 0\n\talbum.Description = \"\"\n\talbum.DateCreated = time.Time{}\n\talbum.Checktime = time.Time{}\n\t// add more 6 fields\n\talbum.IsAlbum = 0\n\talbum.IsHit = 0\n\talbum.IsOfficial = 0\n\talbum.Likes = 0\n\talbum.StatusId = 0\n\talbum.Comments = 0\n\talbum.ArtistIds = dna.IntArray{}\n\treturn album\n}", "func NewAlbum() *Album {\n\talbum := new(Album)\n\talbum.Id = 0\n\talbum.Key = \"\"\n\talbum.Title = \"\"\n\talbum.Artists = dna.StringArray{}\n\talbum.Plays = 0\n\talbum.Songids = dna.IntArray{}\n\talbum.Nsongs = 0\n\talbum.Description = \"\"\n\talbum.Coverart = \"\"\n\talbum.DateCreated = time.Time{}\n\talbum.Checktime = time.Time{}\n\treturn album\n}", "func addAlbum(w http.ResponseWriter, r *http.Request){\n\tw.Header().Set(\"Content-Type\",\"application/json\")\n\tparam := mux.Vars(r)\n\tif err:= Session.Query(`INSERT INTO albumtable (albname) VALUES (?) IF NOT EXISTS;`,param[\"album\"]).Exec();err!=nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Fprintf(w, \"New album added\")\n\t}\n}", "func (s MockedRepository) Create(ctx context.Context, title string) (*Album, error) {\n\treturn s.CreateFn(ctx, title)\n}", "func postAlbums(c *gin.Context) {\n\n\tvar newAlbum album.Album\n\n\tif err := c.BindJSON(&newAlbum); err != nil {\n\t\treturn\n\t}\n\n\tres, err := dbClient.Exec(\"INSERT INTO album (id, title, artist, price) VALUES (?, ?, ?, ?);\",\n\t\tnewAlbum.ID, newAlbum.Title, newAlbum.Artist, newAlbum.Price)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tid, err := res.LastInsertId()\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tnewAlbum.ID = int(id)\n\n\tc.IndentedJSON(http.StatusCreated, newAlbum)\n}", "func ArtistAlbum(id string, page, limit int) (string, error) {\n\t_offset, _limit := formatParams(page, limit)\n\tpreParams := \"{\\\"offset\\\": \"+ _offset +\", \\\"limit\\\": \"+_limit +\", \\\"total\\\": true, \\\"csrf_token\\\": \\\"\\\"}\"\n\tparams, encSecKey, encErr := EncParams(preParams)\n\tif encErr != nil {\n\t\treturn \"\", encErr\n\t}\n\tres, resErr := post(\"http://music.163.com/weapi/artist/albums/\"+id, params, encSecKey)\n\tif resErr != nil {\n\t\treturn \"\", resErr\n\t}\n\treturn res, nil\n}", "func NewAlbum(name string, artist string, positionOnLastFm string, imageUrl string, playCount string) *Album {\n\tnewImageUrl := strings.Replace(imageUrl, \"34s\", \"128s\", -1)\n\talbum := Album{Name: name,\n\t\tPositionOnLastFm: positionOnLastFm,\n\t\tArtist: artist,\n\t\tImageUrl: newImageUrl,\n\t\tPlayCount: playCount}\n\treturn &album\n}", "func PutAlbum(jsonData string, db *neoism.Database) string {\n\t// TODO: Write a data verification method\n\n\t// Parse the json data into an album struct\n\tvar a Album\n\terr := json.Unmarshal([]byte(jsonData), &a)\n\tif err != nil {\n\t\treturn \"{ \\\"err\\\": \\\"Unable to parse json request\\\" }\"\n\t\tfmt.Println(err)\n\t}\n\n\t// Set the submitted date to the current time\n\ta.Submitted = int32(time.Now().Unix())\n\tfmt.Println(a.Submitted)\n\n\t// Create a new node in Neo4j DB\n\tres := []struct {\n\t\tN neoism.Node\n\t}{}\n\n\tcq := neoism.CypherQuery{\n\t\tStatement: \"CREATE (n:Album {name: {name}, year: {year}, submitted: {submitted}}) RETURN n\",\n\t\tParameters: neoism.Props{\"name\": a.Name, \"year\": a.Year, \"submitted\": a.Submitted},\n\t\tResult: res,\n\t}\n\tdb.Cypher(&cq)\n\n\t// TODO: Create relationships to artist, genre\n\n\treturn \"\"\n}", "func postAlbums(c *gin.Context) {\n\tvar newAlbum album\n\n\t// Call BinsJSON to bind the received JSON to\n\t// newAlbum.\n\tif err := c.BindJSON(&newAlbum); err != nil {\n\t\treturn\n\t}\n\n\t// Add the new album to the albums.json.\n\tif result := AddAlbum(newAlbum); result == false {\n\t\tc.IndentedJSON(http.StatusNotAcceptable, gin.H{\"message\": \"ID not avaible\"})\n\t\treturn\n\t}\n\n\tc.IndentedJSON(http.StatusCreated, newAlbum)\n}", "func (b *PhotosSaveBuilder) AlbumID(v int) *PhotosSaveBuilder {\n\tb.Params[\"album_id\"] = v\n\treturn b\n}", "func (b *PhotosGetBuilder) AlbumID(v string) *PhotosGetBuilder {\n\tb.Params[\"album_id\"] = v\n\treturn b\n}", "func GetAlbumFromAPI(id dna.Int) (*Album, error) {\n\tvar album *Album = NewAlbum()\n\talbum.Id = id\n\tapialbum, err := GetAPIAlbum(id)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tif apialbum.Response.MsgCode == 1 {\n\t\t\tif GetKey(apialbum.Id) != GetKey(album.Id) {\n\t\t\t\terrMes := dna.Sprintf(\"Resulted key and computed key are not match. %v =/= %v , id: %v =/= %v\", GetKey(apialbum.Id), GetKey(album.Id), id, apialbum.Id)\n\t\t\t\tpanic(errMes.String())\n\t\t\t}\n\n\t\t\talbum.Title = apialbum.Title\n\t\t\talbum.Artists = dna.StringArray(apialbum.Artists.Split(\" , \").Map(func(val dna.String, idx dna.Int) dna.String {\n\t\t\t\treturn val.Trim()\n\t\t\t}).([]dna.String)).SplitWithRegexp(\",\").Filter(func(v dna.String, i dna.Int) dna.Bool {\n\t\t\t\tif v != \"\" {\n\t\t\t\t\treturn true\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t})\n\n\t\t\talbum.Topics = dna.StringArray(apialbum.Topics.Split(\", \").Map(func(val dna.String, idx dna.Int) dna.String {\n\t\t\t\treturn val.Trim()\n\t\t\t}).([]dna.String)).SplitWithRegexp(\" / \").Unique().Filter(func(v dna.String, i dna.Int) dna.Bool {\n\t\t\t\tif v != \"\" {\n\t\t\t\t\treturn true\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t})\n\t\t\talbum.Plays = apialbum.Plays\n\t\t\t// album.Songids\n\t\t\t// album.Nsongs\n\t\t\t// album.EncodedKey\n\t\t\t// album.Coverart\n\t\t\t// album.DateCreated\n\t\t\talbum.YearReleased = apialbum.YearReleased\n\t\t\talbum.Description = apialbum.Description.RemoveHtmlTags(\"\")\n\n\t\t\talbum.ArtistIds = apialbum.ArtistIds.Split(\",\").ToIntArray()\n\t\t\talbum.IsAlbum = apialbum.IsAlbum\n\t\t\talbum.IsHit = apialbum.IsHit\n\t\t\talbum.IsOfficial = apialbum.IsOfficial\n\t\t\talbum.Likes = apialbum.Likes\n\t\t\talbum.StatusId = apialbum.StatusId\n\t\t\talbum.Comments = apialbum.Comments\n\t\t\talbum.Checktime = time.Now()\n\t\t\treturn album, nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Message code invalid \" + apialbum.Response.MsgCode.ToString().String())\n\t\t}\n\t}\n}", "func GetAlbum(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"short_id\"]\n\n\talbum, err := database.GetAlbumByShortID(id)\n\tif err != nil {\n\t\tutils.RespondWithJSON(w, http.StatusInternalServerError, \"error\", nil)\n\t\treturn\n\t}\n\n\tutils.RespondWithJSON(w, http.StatusOK, \"success\", album)\n\treturn\n}", "func getGooglePhotosAlbumId(name string, c *gphotos.Client) string {\n\tif name == \"\" {\n\t\treturn \"\"\n\t}\n\n\talbum, err := c.GetOrCreateAlbumByName(name)\n\tif err != nil {\n\t\tlog.Printf(\"error creating album: name=%s, error=%v\", name, err)\n\t\treturn \"\"\n\t}\n\treturn album.Id\n}", "func postAlbums(c *gin.Context) {\n\t// deserialize the JSON into the album struct.\n\tvar newAlbum album\n\n\terr := c.BindJSON(&newAlbum)\n\tif err != nil {\n\t\tc.AbortWithError(http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// add the album to the slice of albums.\n\talbums = append(albums, newAlbum)\n\n\t// serialize the struct into JSON and add it to the response.\n\tc.IndentedJSON(http.StatusCreated, newAlbum)\n}", "func (album *Album) New() item.Item {\n\treturn item.Item(NewAlbum())\n}", "func (album *Album) New() item.Item {\n\treturn item.Item(NewAlbum())\n}", "func newUserAlbumPhotoUploader(a int) uploader {\n\treturn getAlbumPhotoUploader(0, a)\n}", "func postAlbums(c *gin.Context) {\n\tvar newAlbum album\n\n\t// Call BindJSON to bind the received JSON to\n\t// newAlbum.\n\tif err := c.BindJSON(&newAlbum); err != nil {\n\t\treturn\n\t}\n\n\t// Add the new album to the slice.\n\talbums = append(albums, newAlbum)\n\tc.IndentedJSON(http.StatusCreated, newAlbum)\n}", "func CreateNewImageV1(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\talbumUuid := uuid.MustParse(params[\"uuid\"])\n\tservice.CreateDefaultAuthService().DoWithValidSession(w, r, func(session *model.Session) (interface{}, error) {\n\t\ttempFile, fileHeader, err := r.FormFile(\"image\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn service.CreateDefaultImageService().CreateNewImageForAlbum(\n\t\t\tuuid.MustParse(session.User.Uuid),\n\t\t\talbumUuid,\n\t\t\ttempFile,\n\t\t\tfileHeader.Filename,\n\t\t\tfileHeader.Size,\n\t\t)\n\t})\n}", "func postAlbums(c *gin.Context) {\n\tvar newAlbum Album\n\n\t// Call bindjson to bind received json to newAlbum\n\tif err := c.BindJSON(&newAlbum); err != nil {\n\t\treturn\n\t}\n\n\t// Add the new album to the slice\n\talbums = append(albums, newAlbum)\n\tc.IndentedJSON(http.StatusCreated, newAlbum)\n}", "func (b *PhotosGetAllCommentsBuilder) AlbumID(v int) *PhotosGetAllCommentsBuilder {\n\tb.Params[\"album_id\"] = v\n\treturn b\n}", "func GetAlbum(id dna.Int) (*Album, error) {\n\tapiAlbum, err := GetAPIAlbum(id)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\talbum := apiAlbum.ToAlbum()\n\t\tif album.Id == 0 {\n\t\t\treturn nil, errors.New(dna.Sprintf(\"Keeng - Album ID: %v not found\", id).String())\n\t\t} else {\n\t\t\treturn album, nil\n\t\t}\n\t}\n}", "func PostPhotoAlbumPhotoHandler(w http.ResponseWriter, r *http.Request) {\n\tfuncTag := \"PostPhotoAlbumPhotoHandler\"\n\n\t// TODO: Get data from body of the request\n\tphoto := &photoDB.Photo{\n\t\tAlbumID: 3,\n\t\tTitle: \"Yeah\",\n\t\tDescription: \"Man\",\n\t\tSrc: \"sam-shortline-candler-grandy-papa-daddy-with-train-12.jpg\",\n\t}\n\n\t// create a transaction\n\ttxo, err := photoDB.NewTxO(\"Test User\")\n\tif err != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"open db transaction\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// create the photo\n\tphoto, err = photoDB.CreatePhoto(txo, photo)\n\tif errTxo := txo.RollbackOnError(err); errTxo != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"create photo\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// commit transaction\n\terr = txo.Commit()\n\tif err != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"commit db transaction\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// build the return data\n\tres := &GetPhotosResponse{}\n\tres.Photos = []*photoDB.Photo{photo}\n\n\t// return\n\tresponder.SendJSON(w, res)\n}", "func (r *AlbumsService) Get(albumId string) *AlbumsGetCall {\n\tc := &AlbumsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.albumId = albumId\n\treturn c\n}", "func getAlbumByID(c *gin.Context) {\n\tid := c.Param(\"id\")\n\ta := GetAlbum(id)\n\n\tif a != nil {\n\t\tc.IndentedJSON(http.StatusOK, a)\n\t\treturn\n\t}\n\n\tc.IndentedJSON(http.StatusNotFound, gin.H{\"message\": \"album not found\"})\n}", "func (b *PhotosEditAlbumBuilder) AlbumID(v int) *PhotosEditAlbumBuilder {\n\tb.Params[\"album_id\"] = v\n\treturn b\n}", "func deleteAlbum(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\",\"application/json\")\n\tparam := mux.Vars(r)\n\t//CQL Operation\n\tif err:= Session.Query(`DELETE FROM albumtable WHERE albname=? IF EXISTS;`,param[\"album\"]).Exec();err!=nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Fprintf(w, \"Album deleted\")\n\t}\n}", "func (c MockedCache) PutAlbum(ctx context.Context, album Album) error {\n\treturn c.PutAlbumFn(ctx, album)\n}", "func (c *Client) UploadToAlbum(ctx context.Context, albumId string, filePath string) (*media_items.MediaItem, error) {\n\ttoken, err := c.Uploader.UploadFile(ctx, filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titem := media_items.SimpleMediaItem{\n\t\tUploadToken: token,\n\t\tFilename: filePath,\n\t}\n\treturn c.MediaItems.CreateToAlbum(ctx, albumId, item)\n}", "func (sf *APISongFreaksAlbum) New() item.Item {\n\treturn item.Item(NewAPISongFreaksAlbum())\n}", "func GetAlbum(urlArgs url.Values, db *neoism.Database) string {\n\t// Pull selection data from url arguments\n\tas := AlbumSelect{Name: urlArgs.Get(\"name\"), Year: urlArgs.Get(\"year\"), Genre: urlArgs.Get(\"genre\"), Artist: urlArgs.Get(\"artist\")}\n\n\t// Pull Neo4j nodes from DB matching selecton params\n\tres := []struct {\n\t\tN string `json:\"n.name\"`\n\t\tY string `json:\"n.year\"`\n\t\tS int32 `json:\"n.submitted\"`\n\t}{}\n\n\tcq := neoism.CypherQuery{\n\t\t// We use regex matches (=~) to gracefully account for\n\t\t// missing fields, so we can use .*\n\t\tStatement: `\n\t\t\tMATCH (n:Album)\n\t\t\tWHERE n.name =~ {name} AND n.year =~ {year}\n\t\t\tRETURN n.name, n.year, n.submitted;\n\t\t`,\n\t\t// DefMatch is substituting .* for us when necessary\n\t\tParameters: neoism.Props{\"name\": DefMatch(as.Name), \"year\": DefMatch(as.Year)},\n\t\tResult: &res,\n\t}\n\tdb.Cypher(&cq)\n\n\t// Turn the list of Nodes into a list of Albums\n\talbums := make([]Album, 1)\n\tfor _, el := range res {\n\t\tname := el.N\n\t\tyear := el.Y\n\t\tsubmitted := el.S\n\n\t\ta := Album{Name: name, Year: year, Submitted: submitted}\n\t\talbums = append(albums, a)\n\t}\n\n\t// Turn the list of albums into a json representation\n\tjsonReturn, err := json.Marshal(albums)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn string(jsonReturn)\n}", "func TestAlbumAddDeletePhoto(t *testing.T) {\n\talbum := api.Album{\n\t\tAlbumTitle: WellKnownAlbumTitle,\n\t}\n\n\tnewAlbum, err := Client.V1().CreateAlbum(album)\n\tif err != nil {\n\t\tt.Errorf(\"expected success creating album: %v\", err)\n\t\tt.FailNow()\n\t}\n\n\t// Add Photos\n\tphotos := []string{\n\t\tWellKnownPhotoID,\n\t}\n\terr = Client.V1().AddPhotosToAlbum(newAlbum.AlbumUID, photos)\n\tif err != nil {\n\t\tt.Errorf(\"expected to add photos to album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\t// Get the photos by album\n\tupdatedPhotos, err := Client.V1().GetPhotos(&api.PhotoOptions{\n\t\tCount: 100,\n\t\tAlbumUID: newAlbum.AlbumUID,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"expecting to list photos by album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\tvar updatedPhotoIDs []string\n\tfor _, photo := range updatedPhotos {\n\t\tupdatedPhotoIDs = append(updatedPhotoIDs, photo.PhotoUID)\n\t}\n\tif len(updatedPhotos) != 2 {\n\t\tt.Errorf(\"expecting 2 well known photo in album, found: %d\", len(updatedPhotos))\n\t}\n\n\terr = Client.V1().DeletePhotosFromAlbum(newAlbum.AlbumUID, updatedPhotoIDs)\n\tif err != nil {\n\t\tt.Errorf(\"expected to delete newly created photos from album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\t// Get the photos by album\n\tupdatedPhotos, err = Client.V1().GetPhotos(&api.PhotoOptions{\n\t\tCount: 100,\n\t\tAlbumUID: newAlbum.AlbumUID,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"expecting to list photos by album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\tif len(updatedPhotos) != 0 {\n\t\tt.Errorf(\"expected empty album, found %d photos\", len(updatedPhotos))\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\terr = Client.V1().DeleteAlbums([]string{newAlbum.AlbumUID})\n\tif err != nil {\n\t\tt.Errorf(\"expected delete album %s, album not deleted: %v\", newAlbum.AlbumUID, err)\n\t\tt.FailNow()\n\t}\n\n\t// put the album back\n\tCreateWellKnownAlbum()\n}", "func ExampleAPIClient_BeginCreateOrUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armapimanagement.NewAPIClient(\"subid\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := client.BeginCreateOrUpdate(ctx,\n\t\t\"rg1\",\n\t\t\"apimService1\",\n\t\t\"tempgroup\",\n\t\tarmapimanagement.APICreateOrUpdateParameter{\n\t\t\tProperties: &armapimanagement.APICreateOrUpdateProperties{\n\t\t\t\tDescription: to.Ptr(\"apidescription5200\"),\n\t\t\t\tAuthenticationSettings: &armapimanagement.AuthenticationSettingsContract{\n\t\t\t\t\tOAuth2: &armapimanagement.OAuth2AuthenticationSettingsContract{\n\t\t\t\t\t\tAuthorizationServerID: to.Ptr(\"authorizationServerId2283\"),\n\t\t\t\t\t\tScope: to.Ptr(\"oauth2scope2580\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSubscriptionKeyParameterNames: &armapimanagement.SubscriptionKeyParameterNamesContract{\n\t\t\t\t\tHeader: to.Ptr(\"header4520\"),\n\t\t\t\t\tQuery: to.Ptr(\"query3037\"),\n\t\t\t\t},\n\t\t\t\tPath: to.Ptr(\"newapiPath\"),\n\t\t\t\tDisplayName: to.Ptr(\"apiname1463\"),\n\t\t\t\tProtocols: []*armapimanagement.Protocol{\n\t\t\t\t\tto.Ptr(armapimanagement.ProtocolHTTPS),\n\t\t\t\t\tto.Ptr(armapimanagement.ProtocolHTTP)},\n\t\t\t\tServiceURL: to.Ptr(\"http://newechoapi.cloudapp.net/api\"),\n\t\t\t},\n\t\t},\n\t\t&armapimanagement.APIClientBeginCreateOrUpdateOptions{IfMatch: nil})\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// TODO: use response item\n\t_ = res\n}", "func (s *KSession) GetAlbum(id int64) (results Album, err error) {\n\tresults = Album{}\n\tres, err := s.request(\"GET\", EndpointLyricsAlbum(id), nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(res, &results)\n\treturn\n}", "func newCommunityAlbumPhotoUploader(gid, a int) uploader {\n\treturn getAlbumPhotoUploader(gid, a)\n}", "func AddAlbum(album *models.Album) int {\n\tdb, err := open()\n\tutil.CheckErr(\"AddAlbum\", err, true)\n\tdefer db.Close()\n\tstmt, err := db.Prepare(\"INSERT INTO albums(name, album_artist_id, year) VALUES(?, ?, ?)\")\n\tutil.CheckErr(\"AddAlbum\", err, true)\n\n\tres, err := stmt.Exec(album.Name, album.ArtistId, album.Year)\n\tutil.CheckErr(\"AddAlbum\", err, true)\n\n\tlastID, err := res.LastInsertId()\n\tutil.CheckErr(\"AddAlbum\", err, true)\n\n\treturn int(lastID)\n}", "func addAlbum(album Album) (int64, error) {\n\tresult, err := db.Exec(\"INSERT INTO album (title, artist, price) VALUES (?, ?, ?)\", album.Title, album.Artist, album.Price)\n\tif nil != err {\n\t\treturn 0, fmt.Errorf(\"addAlbum: %v\", err)\n\t}\n\tid, err := result.LastInsertId()\n\tif nil != err {\n\t\treturn 0, fmt.Errorf(\"addAlbum: %v\", err)\n\t}\n\treturn id, nil\n}", "func (a API) CreateSong(c *gin.Context) (int, interface{}, error) {\n\te := a.err.Fn(\"CreateSong\")\n\tvar song api.Song\n\tif err := c.BindJSON(&song); err != nil {\n\t\treturn 400, nil, e.JSON(err, \"binding\")\n\t}\n\tinvalid, err := a.s.saveSong(song)\n\tif err != nil {\n\t\treturn 500, nil, e.UK(err)\n\t}\n\tif invalid {\n\t\treturn 400, nil, e.DB(err)\n\t}\n\treturn 201, nil, nil\n}", "func (c *Client) OAuthClientCreate(name string, redirectUri string) (*OAuthClient, error) {\n\tparams := struct {\n\t\tName string `json:\"name\"`\n\t\tRedirectUri string `json:\"redirect_uri\"`\n\t}{\n\t\tName: name,\n\t\tRedirectUri: redirectUri,\n\t}\n\tvar oauthClientRes OAuthClient\n\treturn &oauthClientRes, c.Post(&oauthClientRes, \"/oauth/clients\", params)\n}", "func (l *Lidarr) AddAlbumContext(ctx context.Context, album *AddAlbumInput) (*Album, error) {\n\tif album.Releases == nil {\n\t\talbum.Releases = make([]*AddAlbumInputRelease, 0)\n\t}\n\n\tvar body bytes.Buffer\n\tif err := json.NewEncoder(&body).Encode(album); err != nil {\n\t\treturn nil, fmt.Errorf(\"json.Marshal(%s): %w\", bpAlbum, err)\n\t}\n\n\treq := starr.Request{\n\t\tURI: bpAlbum,\n\t\tQuery: make(url.Values),\n\t\tBody: &body,\n\t}\n\n\tvar output Album\n\tif err := l.PostInto(ctx, req, &output); err != nil {\n\t\treturn nil, fmt.Errorf(\"api.Post(%s): %w\", &req, err)\n\t}\n\n\treturn &output, nil\n}", "func newAlbumSong(s *models.Song, showDiscNum bool, overrideIndex int) *albumSong {\n\tsong := &albumSong{\n\t\tTextView: tview.NewTextView(),\n\t\tsong: s,\n\t\tshowDiscNum: showDiscNum,\n\t}\n\n\tif overrideIndex == -1 {\n\t\tsong.index = s.Index\n\t} else {\n\t\tsong.index = overrideIndex\n\t}\n\n\tsong.SetBackgroundColor(config.Color.Background)\n\tsong.SetTextColor(config.Color.Text)\n\tsong.setText()\n\tsong.SetBorderPadding(0, 0, 1, 1)\n\n\treturn song\n}", "func GetLinkClient(albumID string, clientID string) (imageLink string) {\n\n\t// This hash is the albumID hash\n\turl := \"https://api.imgur.com/3/album/\" + albumID + \"/images\"\n\tmethod := \"GET\"\n\n\tpayload := &bytes.Buffer{}\n\twriter := multipart.NewWriter(payload)\n\terr := writer.Close()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(method, url, payload)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treq.Header.Add(\"Authorization\", \"Client-ID \"+clientID)\n\n\treq.Header.Set(\"Content-Type\", writer.FormDataContentType())\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"[-] Error connecting:\", err)\n\t}\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\n\tvar results AlbumImages\n\terrr := json.Unmarshal([]byte(body), &results)\n\tif errr != nil {\n\t\tfmt.Println(\"[!] Error unmarshalling::\", errr)\n\t}\n\n\tdatavalues := results.Data\n\tif results.Success == true {\n\t\timageLink = datavalues[0].Link\n\t}\n\treturn imageLink\n\n}", "func (p *Photos) AddToAlbum(ctx context.Context, title string, uploadItems []UploadItem) ([]*AddResult, error) {\n\tlog.Printf(\"Finding album %s\", title)\n\talbum, err := p.FindAlbumByTitle(ctx, title)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not list albums: %s\", err)\n\t}\n\tif album == nil {\n\t\tlog.Printf(\"Creating album %s\", title)\n\t\tcreated, err := p.service.CreateAlbum(ctx, &photoslibrary.CreateAlbumRequest{\n\t\t\tAlbum: &photoslibrary.Album{Title: title},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Could not create an album: %s\", err)\n\t\t}\n\t\talbum = created\n\t}\n\treturn p.add(ctx, uploadItems, photoslibrary.BatchCreateMediaItemsRequest{\n\t\tAlbumId: album.Id,\n\t\tAlbumPosition: &photoslibrary.AlbumPosition{Position: \"LAST_IN_ALBUM\"},\n\t}), nil\n}", "func (b *PhotosMoveBuilder) TargetAlbumID(v int) *PhotosMoveBuilder {\n\tb.Params[\"target_album_id\"] = v\n\treturn b\n}", "func GetRandomAlbum() *Album {\n\trand.Seed(time.Now().UnixNano())\n\treturn GetAlbumAtPosition(rand.Intn(1000))\n}", "func (c *CachitaCache) albumKey(title string) string {\n\treturn \"album:\" + title\n}", "func (s *Session) UploadPhotosToAlbum(ss []string, gid, aid int) (json.RawMessage, error) {\n\treturn s.upload(ss, nil, getAlbumPhotoUploader(gid, aid))\n}", "func ExampleAuthorizationsClient_BeginCreateOrUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armavs.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewAuthorizationsClient().BeginCreateOrUpdate(ctx, \"group1\", \"cloud1\", \"authorization1\", armavs.ExpressRouteAuthorization{}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ExpressRouteAuthorization = armavs.ExpressRouteAuthorization{\n\t// \tName: to.Ptr(\"authorization1\"),\n\t// \tType: to.Ptr(\"Microsoft.AVS/privateClouds/authorizations\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.AVS/privateClouds/cloud1/authorizations/authorization1\"),\n\t// \tProperties: &armavs.ExpressRouteAuthorizationProperties{\n\t// \t\tExpressRouteAuthorizationID: to.Ptr(\"/subscriptions/5206f269-120b-41ef-a95b-0dce7109de61/resourceGroups/tnt34-cust-mockp02-spearj2dev/providers/Microsoft.Network/expressroutecircuits/tnt34-cust-mockp02-spearj2dev-er/authorizations/myauth\"),\n\t// \t\tExpressRouteAuthorizationKey: to.Ptr(\"37b0db3b-3b17-4c7b-bf76-bf13b01bcadc\"),\n\t// \t\tExpressRouteID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2/providers/Microsoft.Network/expressroutecircuits/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2-xconnect\"),\n\t// \t\tProvisioningState: to.Ptr(armavs.ExpressRouteAuthorizationProvisioningStateSucceeded),\n\t// \t},\n\t// }\n}", "func AddAlbum(albName string) *utils.ApplicationError {\n\treturn model.AddAlbum(albName)\n}", "func getAlbums(c *gin.Context) {\n\tc.IndentedJSON(http.StatusOK, albums)\n}", "func createImage(w http.ResponseWriter, r *http.Request) {\r\n\t//\tvars := mux.Vars(r)\r\n\t//\tid, _ := strconv.ParseInt(vars[\"id\"], 10, 64)\r\n\tvar ni newimage\r\n\tif err := json.NewDecoder(r.Body).Decode(&ni); err != nil {\r\n\t\tlogger.Warnf(\"error decoding image: %s\", err)\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\tbi := baseImage{ni.BaseImage}\r\n\tcr := newImage(ni.UserId, ni.ImageName, ni.Tag, ni.Descrip)\r\n\tif err := cr.Add(); err != nil {\r\n\t\tlogger.Warnf(\"error creating image: %s\", err)\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\tw.Header().Set(\"content-type\", \"application/json\")\r\n\tw.WriteHeader(http.StatusCreated)\r\n\tif err := json.NewEncoder(w).Encode(bi); err != nil {\r\n\t\tlogger.Error(err)\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t}\r\n}", "func (s *SmartContract) CreateAsset(ctx contractapi.TransactionContextInterface, id string, color string, size int, appraisedValue int) error {\n\n\t// Demonstrate the use of Attribute-Based Access Control (ABAC) by checking\n\t// to see if the caller has the \"abac.creator\" attribute with a value of true;\n\t// if not, return an error.\n\n\terr := ctx.GetClientIdentity().AssertAttributeValue(\"abac.creator\", \"true\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"submitting client not authorized to create asset, does not have abac.creator role\")\n\t}\n\n\texists, err := s.AssetExists(ctx, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif exists {\n\t\treturn fmt.Errorf(\"the asset %s already exists\", id)\n\t}\n\n\t// Get ID of submitting client identity\n\tclientID, err := s.GetSubmittingClientIdentity(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tasset := Asset{\n\t\tID: id,\n\t\tColor: color,\n\t\tSize: size,\n\t\tOwner: clientID,\n\t\tAppraisedValue: appraisedValue,\n\t}\n\tassetJSON, err := json.Marshal(asset)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.GetStub().PutState(id, assetJSON)\n}", "func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage, options *GalleryImagesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif galleryName == \"\" {\n\t\treturn nil, errors.New(\"parameter galleryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryName}\", url.PathEscape(galleryName))\n\tif galleryImageName == \"\" {\n\t\treturn nil, errors.New(\"parameter galleryImageName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryImageName}\", url.PathEscape(galleryImageName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, galleryImage)\n}", "func (r *Client) GetAlbumImage(id int) (image string, hasImage bool) {\n\t// Get the first image in the dir if there is one\n\tlocation := r.Db.Entries[id].Location\n\tstrId := strconv.Itoa(id)\n\timagePath := \"/albums/a\" + strId + \".jpg\"\n\n\t// // Check if already exists\n\t// if _, err := os.Stat(\"public\" + imagePath); err == nil {\n\t// \t// File already exists\n\t// \timage = imagePath\n\t// \thasImage = true\n\t// \treturn\n\t// }\n\n\t// Remove the bits we dont want\n\tlocation = strings.TrimLeft(location, \"file:/\")\n\tlastSlash := strings.LastIndex(location, \"/\")\n\tlocation = location[:lastSlash+1]\n\t// fmt.Println(html.UnescapeString(location))\n\n\te, _ := url.QueryUnescape(location)\n\n\tfilepath.Walk(\"/\"+e, func(path string, _ os.FileInfo, _ error) error {\n\n\t\tlastFour := path[len(path)-4:]\n\t\tif lastFour == \".jpg\" || lastFour == \"jpeg\" || lastFour == \".png\" {\n\t\t\tCopy(\"public/albums/a\"+strId+\".jpg\", path)\n\t\t\timage = imagePath\n\t\t\thasImage = true\n\t\t\treturn nil\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn\n}", "func NewAlbumview(playSong func(song *models.Song), playSongs func(songs []*models.Song)) *AlbumView {\n\ta := &AlbumView{\n\t\tBanner: twidgets.NewBanner(),\n\t\tprevious: &previous{},\n\t\tlist: twidgets.NewScrollList(nil),\n\t\tplaySongFunc: playSong,\n\t\tplaySongsFunc: playSongs,\n\n\t\tdescription: tview.NewTextView(),\n\t\tprevBtn: newButton(\"Back\"),\n\t\tinfobtn: newButton(\"Info\"),\n\t\tplayBtn: newButton(\"Play all\"),\n\t}\n\n\ta.list.ItemHeight = 2\n\ta.list.Padding = 1\n\ta.list.SetInputCapture(a.listHandler)\n\ta.list.SetBorder(true)\n\ta.list.SetBorderColor(config.Color.Border)\n\ta.list.Grid.SetColumns(1, -1)\n\n\ta.SetBorder(true)\n\ta.SetBorderColor(config.Color.Border)\n\ta.list.SetBackgroundColor(config.Color.Background)\n\ta.Grid.SetBackgroundColor(config.Color.Background)\n\ta.listFocused = false\n\ta.playBtn.SetSelectedFunc(a.playAlbum)\n\n\ta.Banner.Grid.SetRows(1, 1, 1, 1, -1)\n\ta.Banner.Grid.SetColumns(6, 2, 10, -1, 10, -1, 10, -3)\n\ta.Banner.Grid.SetMinSize(1, 6)\n\n\ta.Banner.Grid.AddItem(a.prevBtn, 0, 0, 1, 1, 1, 5, false)\n\ta.Banner.Grid.AddItem(a.description, 0, 2, 2, 6, 1, 10, false)\n\ta.Banner.Grid.AddItem(a.playBtn, 3, 2, 1, 1, 1, 10, true)\n\ta.Banner.Grid.AddItem(a.infobtn, 3, 4, 1, 1, 1, 10, false)\n\ta.Banner.Grid.AddItem(a.list, 4, 0, 1, 8, 4, 10, false)\n\n\tbtns := []*button{a.prevBtn, a.playBtn, a.infobtn}\n\tselectables := []twidgets.Selectable{a.prevBtn, a.playBtn, a.infobtn, a.list}\n\tfor _, btn := range btns {\n\t\tbtn.SetLabelColor(config.Color.ButtonLabel)\n\t\tbtn.SetLabelColorActivated(config.Color.ButtonLabelSelected)\n\t\tbtn.SetBackgroundColor(config.Color.ButtonBackground)\n\t\tbtn.SetBackgroundColorActivated(config.Color.ButtonBackgroundSelected)\n\t}\n\n\ta.prevBtn.SetSelectedFunc(a.goBack)\n\n\ta.Banner.Selectable = selectables\n\ta.description.SetBackgroundColor(config.Color.Background)\n\ta.description.SetTextColor(config.Color.Text)\n\treturn a\n}", "func (c *Client) CreateOIDCAuthRequest(ctx context.Context, req types.OIDCAuthRequest) (*types.OIDCAuthRequest, error) {\n\tif resp, err := c.APIClient.CreateOIDCAuthRequest(ctx, req); err != nil {\n\t\tif !trace.IsNotImplemented(err) {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t} else {\n\t\treturn resp, nil\n\t}\n\n\tout, err := c.PostJSON(ctx, c.Endpoint(\"oidc\", \"requests\", \"create\"), createOIDCAuthRequestReq{\n\t\tReq: req,\n\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tvar response *types.OIDCAuthRequest\n\tif err := json.Unmarshal(out.Bytes(), &response); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\treturn response, nil\n}", "func (c MockedCache) GetAlbum(ctx context.Context, title string) (Album, error) {\n\treturn c.GetAlbumFn(ctx, title)\n}", "func (c *CachitaCache) PutAlbum(ctx context.Context, album photoslibrary.Album, ttl time.Duration) error {\n\treturn c.store.Put(c.albumKey(album.Title), album, ttl)\n}", "func createAccount(gm *gomatrix.Client) (accessToken, userID string, err error) {\n username := \"testing-\" + randString(5)\n // Get the session token\n req := &gomatrix.ReqRegister{\n Username: username,\n Password: testPass,\n }\n _, respInt, err := gm.Register(req)\n if err != nil {\n return\n }\n\n // Make a dummy register request\n req = &gomatrix.ReqRegister{\n Username: username,\n Password: testPass,\n Auth: struct {\n Session string\n }{\n Session: respInt.Session,\n },\n }\n resp, err := gm.RegisterDummy(req)\n if err != nil {\n return\n }\n\n // Save the access token and UserID\n accessToken = resp.AccessToken\n userID = resp.UserID\n return\n}", "func (c *UnsavedPostImageClient) Create() *UnsavedPostImageCreate {\n\tmutation := newUnsavedPostImageMutation(c.config, OpCreate)\n\treturn &UnsavedPostImageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}", "func CreateAPI(reqBody *APIReqBody) (string, error) {\n\treq, err := creatHTTPPOSTAPIRequest(publisherAPIEndpoint, reqBody)\n\tvar resBody APICreateResp\n\terr = send(CreateAPIContext, req, &resBody, http.StatusCreated)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resBody.ID, nil\n}", "func createAssetHandlerFn(ctx context.CoreContext, cdc *wire.Codec, kb keys.Keybase) func(http.ResponseWriter, *http.Request) {\n\treturn withErrHandler(func(w http.ResponseWriter, r *http.Request) error {\n\t\tvar m createAssetBody\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\terr = cdc.UnmarshalJSON(body, &m)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = m.BaseReq.Validate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif m.Name == \"\" {\n\t\t\treturn errors.New(\"name is required\")\n\t\t}\n\n\t\tif m.Quantity.IsZero() {\n\t\t\treturn errors.New(\"quantity is required\")\n\t\t}\n\n\t\tif m.AssetID == \"\" {\n\t\t\treturn errors.New(\"asset.id is required\")\n\t\t}\n\t\tinfo, err := kb.Get(m.BaseReq.Name)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"asset.id is required\")\n\t\t}\n\n\t\t// build message\n\t\tmsg := asset.MsgCreateAsset{\n\t\t\tAssetID: m.AssetID,\n\t\t\tName: m.Name,\n\t\t\tParent: m.Parent,\n\t\t\tProperties: m.Properties,\n\t\t\tSender: sdk.AccAddress(info.GetPubKey().Address()),\n\t\t\tQuantity: m.Quantity,\n\t\t\tUnit: m.Unit,\n\t\t}\n\t\tsignAndBuild(ctx, cdc, w, m.BaseReq, msg)\n\t\treturn nil\n\t})\n}", "func create() cli.ActionFunc {\n\treturn func(ctx *cli.Context) error {\n\t\tcreds := gobaclient.Credentials{\n\t\t\tUsername: ctx.Parent().GlobalString(\"username\"),\n\t\t\tPassword: ctx.Parent().GlobalString(\"password\"),\n\t\t}\n\n\t\turi, err := url.Parse(ctx.Parent().GlobalString(\"uri\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttyp := goba.DatabaseType(ctx.Parent().GlobalString(\"database-type\"))\n\n\t\timage, err := gobaclient.CreateImage(*uri, creds, typ)\n\t\tif err == nil {\n\t\t\tprintImageNames(*image)\n\t\t}\n\t\treturn err\n\t}\n}", "func DeleteAlbum(albName string) *utils.ApplicationError {\n\treturn model.DeleteAlbum(albName)\n}", "func CreateContainer(endpoint, authToken, name string) error {\n\treq, err := http.NewRequest(\"PUT\", fmt.Sprintf(\"%s/%s\", endpoint, name), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"X-Auth-Token\", authToken)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 && resp.StatusCode != 202 {\n\t\treturn fmt.Errorf(\"Error: cannot create container: %s: %d\", name, resp.StatusCode)\n\t}\n\n\treturn nil\n}", "func getAlbums(c *gin.Context) {\n\t// serialize the struct into JSON and add it to the response.\n\tc.IndentedJSON(http.StatusOK, albums)\n}", "func getAlbumFromAPI(album *Album) <-chan bool {\n\tchannel := make(chan bool, 1)\n\tgo func() {\n\t\tapialbum, err := GetAlbumFromAPI(album.Id)\n\t\tif err == nil {\n\t\t\talbum.Title = apialbum.Title\n\t\t\talbum.Artists = apialbum.Artists\n\t\t\talbum.Topics = apialbum.Topics\n\t\t\talbum.Plays = apialbum.Plays\n\t\t\talbum.YearReleased = apialbum.YearReleased\n\t\t\talbum.Description = apialbum.Description\n\t\t\talbum.ArtistIds = apialbum.ArtistIds\n\t\t\talbum.IsAlbum = apialbum.IsAlbum\n\t\t\talbum.IsHit = apialbum.IsHit\n\t\t\talbum.IsOfficial = apialbum.IsOfficial\n\t\t\talbum.Likes = apialbum.Likes\n\t\t\talbum.StatusId = apialbum.StatusId\n\t\t\talbum.Comments = apialbum.Comments\n\t\t\talbum.Checktime = time.Now()\n\t\t}\n\t\tchannel <- true\n\n\t}()\n\treturn channel\n}", "func getAlbumByID(c *gin.Context) {\n\tid := c.Param(\"id\")\n\n\t// Loop over the list of albums, looking for an album whose id value matches the parameter\n\tfor _, a := range albums {\n\t\tif a.ID == id {\n\t\t\tc.IndentedJSON(http.StatusOK, a)\n\t\t\treturn\n\t\t}\n\t}\n\tc.IndentedJSON(http.StatusNotFound, gin.H{\"message\": \"album not found\"})\n}", "func (s MockedRepository) Get(ctx context.Context, albumId string) (*Album, error) {\n\treturn s.GetFn(ctx, albumId)\n}", "func SubscribeTag() {\n\ttime.Sleep(2 * time.Second)\n\trequest := gorequest.New()\n\turl := api + \"subscriptions\"\n\n\tm := map[string]interface{}{\n\t\t\"client_id\": cfg.Instagram.ClientID,\n\t\t\"client_secret\": cfg.Instagram.ClientSecret,\n\t\t\"verify_token\": cfg.Instagram.Verify,\n\t\t\"object_id\": cfg.Instagram.TagName,\n\t\t//\"callback_url\": cfg.Instagram.CallbackUrl + \"/publish/photo\",\n\t\t\"callback_url\": callback_url + \"/publish/photo\",\n\t\t\"object\": \"tag\",\n\t\t\"aspect\": \"media\",\n\t}\n\n\tmJson, _ := json.Marshal(m)\n\tfmt.Println(string(mJson))\n\t//contentReader := bytes.NewReader(mJson)\n\t//req, _ := http.NewRequest(\"POST\", url, contentReader)\n\t////req.Header.Set(\"Content-Type\", \"application/json\")\n\t//client := &http.Client{}\n\t//resp, _ := client.Do(req)\n\t//fmt.Println(resp)\n\n\tresp, body, errs := request.Post(url).\n\t\t//Send(`{\"client_id\":\"7839c51c2a324f46a51c77c91711c8c3\",\"client_secret\":\"9fbfea5eab08476a88c56f825175501e\",\"verify_token\":\"hihi\",\"object_id\":\"catsofinstagram\",\"callback_url\":\"http://b099b464.ngrok.io/publish/photo\",\"object\":\"tag\",\"aspect\":\"media\"}`).End()\n\t\tSet(\"Header\", \"application/x-www-form-urlencoded\").\n\t\tSend(string(mJson)).End()\n\n\tif errs != nil {\n\t\tlog.Println(errs)\n\t\tfmt.Println(resp.StatusCode)\n\t\tfmt.Println(body)\n\t} else {\n\t\tlog.Println(\"Sucessfully subscribe.\")\n\t\tfmt.Println(resp.StatusCode)\n\t\tfmt.Println(body)\n\t}\n\tdefer resp.Body.Close()\n}", "func AddSong(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Received Request: /AddSong\")\n\tparams := mux.Vars(r)\n\tencodedTrackURI := params[\"songURI\"]\n\ttrackURI, err := url.QueryUnescape(encodedTrackURI)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"Track URI: \" + trackURI)\n\n\tvar URL *url.URL\n\tURL, err = url.Parse(\"https://api.spotify.com\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tURL.Path += \"v1/playlists/\" + sCurrentPlaylist.PlaylistID + \"/tracks\"\n\n\treqBody := make(map[string][]string)\n\treqBody[\"uris\"] = append(reqBody[\"uris\"], trackURI)\n\treqBody[\"uris\"] = append(reqBody[\"uris\"], \"spotify:track:3e9HZxeyfWwjeyPAMmWSSQ\")\n\treqBody[\"uris\"] = append(reqBody[\"uris\"], \"spotify:track:5uIRujGRZv5t4fGKkUTv4n\")\n\treqBody[\"uris\"] = append(reqBody[\"uris\"], \"spotify:track:6zWU7YALeEDMcPGhKKZJhV\")\n\treqBody[\"uris\"] = append(reqBody[\"uris\"], \"spotify:track:6XQHlsNu6so4PdglFkJQRJ\")\n\tbytesRepresentation, err := json.Marshal(reqBody)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"POST\", URL.String(), bytes.NewBuffer(bytesRepresentation))\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Authorization\", \"Bearer \"+sAcsTok.AccessToken)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// resp body contains playlist \"snapshot_id\"\n\t_, err = client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func resourceAliyunDatahubProjectCreate(d *schema.ResourceData, meta interface{}) error {\n\tdh := meta.(*AliyunClient).dhconn\n\n\tprojectName := d.Get(\"name\").(string)\n\tprojectComment := d.Get(\"comment\").(string)\n\n\terr := dh.CreateProject(projectName, projectComment)\n\tif err != nil {\n\t\tif NotFoundError(err) {\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"failed to create project '%s' with error: %s\", projectName, err)\n\t}\n\n\td.SetId(projectName)\n\treturn resourceAliyunDatahubProjectUpdate(d, meta)\n}", "func getAlbumByID(c *gin.Context) {\n\tid := c.Param(\"id\")\n\n\t// Loop over the list of albums, looking for\n\t// an album whose ID value matches the parameter.\n\tfor _, a := range albums {\n\t\tif a.ID == id {\n\t\t\tc.IndentedJSON(http.StatusOK, a)\n\t\t\treturn\n\t\t}\n\t}\n\tc.IndentedJSON(http.StatusNotFound, gin.H{\"message\": \"album not found\"})\n}", "func NewAlbumDiscogService(s *Session) AlbumDiscogService {\n\tservice := AlbumDiscogService{session: s}\n\treturn service\n}", "func UploadPhoto(req UploadPhotoRequest) error {\n\tvar err error\n\trow := configure.SQL.QueryRow(`\n\t\tSELECT year, title FROM Album WHERE id = ?\n\t`, req.AlbumID)\n\tvar (\n\t\tyear int\n\t\ttitle string\n\t)\n\tif err = row.Scan(&year, &title); err != nil {\n\t\tlog.Println(\"Error while scanning row\")\n\t\treturn err\n\t}\n\talbumPath := fmt.Sprintf(\"%s/album/%d/%s\", configure.AppProperties.StaticFilePath, year, title)\n\tinsertPhoto := `INSERT INTO Photo (album_id, path) VALUES (?, ?)`\n\tfor _, photoBase64 := range req.PhotoList {\n\t\tphotoName := strconv.FormatInt(time.Now().Unix(), 10)\n\t\tphotoPath := albumPath + string(os.PathSeparator) + photoName\n\t\t// TODO: Transaction management\n\t\tif photoPath, err = utils.DecodeAndSaveBase64(photoPath, photoBase64, utils.ImageBase64); err != nil {\n\t\t\tlog.Println(\"Failed on DecodeAndSaveBase64\")\n\t\t\treturn err\n\t\t}\n\t\tif _, err = configure.SQL.Query(insertPhoto, req.AlbumID, photoPath); err != nil {\n\t\t\tlog.Println(\"Failed on inserting photo\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func albumByID(id int64, db *sql.DB) (Album, error) {\n\t// An album to hold data from the returned row.\n\tvar alb Album\n\n\trow := db.QueryRow(\"SELECT id,title,artist,price FROM album WHERE id = ?\", id)\n\tif err := row.Scan(&alb.ID, &alb.Title, &alb.Artist, &alb.Price); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn alb, fmt.Errorf(\"albumsById %d: no such album\", id)\n\t\t}\n\t\treturn alb, fmt.Errorf(\"albumsById %d: %v\", id, err)\n\t}\n\treturn alb, nil\n}", "func (a *IamProjectApiService) IamProjectOwnershipCreate(ctx context.Context, projectId string) ApiIamProjectOwnershipCreateRequest {\n\treturn ApiIamProjectOwnershipCreateRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tprojectId: projectId,\n\t}\n}", "func (c MockedCache) InvalidateAlbum(ctx context.Context, title string) error {\n\treturn c.InvalidateAlbumFn(ctx, title)\n}", "func DeleteAlbum(albumID int) error {\n\tdeleteAlbum := `DELETE FROM Album WHERE id = ?`\n\tif _, err := configure.SQL.Query(deleteAlbum, albumID); err != nil {\n\t\tlog.Println(\"Failed to delete album\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *MailgunImpl) CreateCampaign(name, id string) error {\n\tr := simplehttp.NewHTTPRequest(generateApiUrl(m, campaignsEndpoint))\n\tr.SetClient(m.Client())\n\tr.SetBasicAuth(basicAuthUser, m.ApiKey())\n\n\tpayload := simplehttp.NewUrlEncodedPayload()\n\tpayload.AddValue(\"name\", name)\n\tif id != \"\" {\n\t\tpayload.AddValue(\"id\", id)\n\t}\n\t_, err := makePostRequest(r, payload)\n\treturn err\n}", "func (a *IAMApiService) CreateGroup(ctx context.Context, gid string, iamGroupCreate IamGroupCreate) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = http.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/acs/api/v1/groups/{gid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"gid\"+\"}\", fmt.Sprintf(\"%v\", gid), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &iamGroupCreate\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v IamError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (l *Lidarr) GetAlbumContext(ctx context.Context, mbID string) ([]*Album, error) {\n\treq := starr.Request{Query: make(url.Values), URI: bpAlbum}\n\tif mbID != \"\" {\n\t\treq.Query.Add(\"ForeignAlbumId\", mbID)\n\t}\n\n\tvar output []*Album\n\n\tif err := l.GetInto(ctx, req, &output); err != nil {\n\t\treturn nil, fmt.Errorf(\"api.Get(%s): %w\", &req, err)\n\t}\n\n\treturn output, nil\n}", "func (client IdentityClient) createRegionSubscription(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tenancies/{tenancyId}/regionSubscriptions\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateRegionSubscriptionResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (r *artistResource) create(c *gin.Context) {\n\tvar model models.Artist\n\tif err := c.Bind(&model); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err})\n\t\treturn\n\t}\n\tresponse, err := r.service.Create(c, &model)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusCreated, gin.H{\n\t\t\"id\": response.ID,\n\t\t\"error\": \"\",\n\t\t\"message\": response,\n\t})\n}", "func (l *Lidarr) GetAlbum(mbID string) ([]*Album, error) {\n\treturn l.GetAlbumContext(context.Background(), mbID)\n}", "func (c *Client) CreateAsset(asset *Asset) (*Asset, error) {\n\tvar out AssetItem\n\terr := c.WriteObject(\"/api/v2/assets\", \"POST\", asset, &out)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &out.Asset, nil\n}", "func (a *AuthorizationsService) Create(params interface{}) (auth *Authorization, result *Result) {\n\tresult = a.client.post(a.URL, params, &auth)\n\treturn\n}", "func (s *HMACAuthService) Create(ctx context.Context,\n\tconsumerUsernameOrID *string, hmacAuth *HMACAuth) (*HMACAuth, error) {\n\n\tcred, err := s.client.credentials.Create(ctx, \"hmac-auth\",\n\t\tconsumerUsernameOrID, hmacAuth)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar createdHMACAuth HMACAuth\n\terr = json.Unmarshal(cred, &createdHMACAuth)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &createdHMACAuth, nil\n}", "func getAlbums(c *gin.Context) {\n\talbums := GetAlbums()\n\n\tif albums == nil {\n\t\tc.IndentedJSON(http.StatusNotFound, gin.H{\"message\": \"Can not obtain list of albums\"})\n\t\treturn\n\t}\n\n\tc.IndentedJSON(http.StatusOK, albums)\n}", "func (s *SmartContract) CreateAuction(ctx contractapi.TransactionContextInterface, auctionID string, priceperkwh int, amount int, time_rem int) error { //amount = how many kwh\n\n\t// get ID of submitting client\n\tclientID, err := ctx.GetClientIdentity().GetID()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get client identity %v\", err)\n\t}\n\n\t// get org of submitting client\n\tclientOrgID, err := ctx.GetClientIdentity().GetMSPID()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get client identity %v\", err)\n\t}\n\n\t// Create auction\n\tbidders := make(map[string]BidHash)\n\trevealedBids := make(map[string]FullBid)\n\ttimestamp, err := ctx.GetStub().GetTxTimestamp()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get timestamp\")\n\t}\n\n\ttime := time.Unix(timestamp.Seconds, int64(timestamp.Nanos)) //.String()\n\n\tauction := Auction{\n\t\tType: \"auction\",\n\t\tItemSold: \"energy(KWh)\",\n\t\tAmount: amount,\n\t\tPricePerKWh: priceperkwh,\n\t\tTime_started: time,\n\t\tTime_remaining: time_rem,\n\t\tPrice: amount * priceperkwh,\n\t\tSeller: clientID,\n\t\tOrgs: []string{clientOrgID},\n\t\tPrivateBids: bidders,\n\t\tRevealedBids: revealedBids,\n\t\tWinner: \"\",\n\t\tStatus: \"open\",\n\t}\n\n\tauctionBytes, err := json.Marshal(auction)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// put auction into state\n\terr = ctx.GetStub().PutState(auctionID, auctionBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to put auction in public data: %v\", err)\n\t}\n\n\t// set the seller of the auction as an endorser\n\terr = setAssetStateBasedEndorsement(ctx, auctionID, clientOrgID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed setting state based endorsement for new organization: %v\", err)\n\t}\n\n\treturn nil\n}", "func ShowAlbum() ([]string, *utils.ApplicationError) {\n\treturn model.ShowAlbum()\n}", "func NewGetImagesByAlbumApiController(s GetImagesByAlbumApiServicer) Router {\n\treturn &GetImagesByAlbumApiController{service: s}\n}", "func getAlbums(c *gin.Context) {\n\n\tvar albums []album.Album\n\n\tdbClient.Select(&albums, \"SELECT id, title, artist, price FROM album;\")\n\n\tc.IndentedJSON(http.StatusOK, albums)\n}" ]
[ "0.64363945", "0.62115777", "0.6173034", "0.6124928", "0.60992676", "0.5975251", "0.5954263", "0.5921158", "0.5825856", "0.5802341", "0.57515854", "0.569839", "0.567057", "0.56693417", "0.5629028", "0.549317", "0.5457849", "0.5339119", "0.5260204", "0.52040607", "0.5150722", "0.5148682", "0.5148682", "0.5141615", "0.5136042", "0.5109292", "0.50992286", "0.50641507", "0.503226", "0.49828535", "0.49509084", "0.49187592", "0.4899237", "0.48813513", "0.48774365", "0.48690248", "0.48077455", "0.47746348", "0.47642434", "0.47620535", "0.4754492", "0.47437218", "0.47431257", "0.4684377", "0.4674475", "0.4670859", "0.46702117", "0.4663752", "0.46523276", "0.46442765", "0.46413806", "0.45849907", "0.45239866", "0.4511183", "0.4491285", "0.4475464", "0.44515118", "0.44479418", "0.44406876", "0.4425612", "0.4424284", "0.44202492", "0.44086227", "0.44074214", "0.4402792", "0.43958887", "0.4380291", "0.43764585", "0.43734863", "0.43726563", "0.43668857", "0.4363264", "0.4354216", "0.43498594", "0.4340413", "0.4334055", "0.4324377", "0.43225455", "0.43210387", "0.43020293", "0.42837873", "0.42430985", "0.42384556", "0.42345253", "0.42344356", "0.42302713", "0.42218813", "0.4208049", "0.41987544", "0.41851914", "0.41740572", "0.41689268", "0.4160339", "0.4149355", "0.41477525", "0.41452712", "0.41414747", "0.41379258", "0.4134384", "0.41310006" ]
0.7381877
0
GetAlbumImages is a function that supposed to retrieve response images
func GetResponseImages(albumID string, clientID string) (imageLink string) { // removed: (imageLink interface{}) // This hash is the albumID hash url := "https://api.imgur.com/3/album/" + albumID + "/images.json" method := "GET" payload := &bytes.Buffer{} writer := multipart.NewWriter(payload) err := writer.Close() if err != nil { fmt.Println(err) } client := &http.Client{} req, err := http.NewRequest(method, url, payload) if err != nil { fmt.Println(err) } req.Header.Add("Authorization", "Client-ID "+clientID) req.Header.Set("Content-Type", writer.FormDataContentType()) res, err := client.Do(req) if err != nil { fmt.Println("[-] Error connecting:", err) } defer res.Body.Close() body, err := ioutil.ReadAll(res.Body) var results AlbumImages errr := json.Unmarshal([]byte(body), &results) if errr != nil { fmt.Println("[!] Error unmarshalling::", errr) } datavalues := results.Data if results.Success == true { for field := range datavalues { if strings.Contains(datavalues[field].Description, "response") { fmt.Println("[+] ImageID:", datavalues[field].ID) fmt.Println("[+] ImageTitle:", datavalues[field].Title) fmt.Println("[+] Description:", datavalues[field].Description) fmt.Println("[+] ImageLink:", datavalues[field].Link) fmt.Println(" ") responseURL = datavalues[field].Link } // fmt.Println("[+] Logic worked and got a response from a client: ", datavalues[field].Link) } } return responseURL }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetImagesForAlbumV1(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tuuidParam := params[\"uuid\"]\n\n\timageModels := service.CreateDefaultImageService().GetAllImagesForAlbum(uuid.MustParse(uuidParam))\n\tdata, _ := json.Marshal(imageModels)\n\t_, _ = w.Write(data)\n\n}", "func showImagesInAlbum(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\",\"application/json\")\n\tparam := mux.Vars(r)\n\titer:=Session.Query(\"SELECT imagelist FROM albumtable WHERE albname=?;\",param[\"album\"]).Iter()\n\tvar data []string\n\tfor iter.Scan(&data){\n\t\tjson.NewEncoder(w).Encode(data)\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (c *GetImagesByAlbumApiController) GetImagesByAlbum(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tid := params[\"id\"]\n\tresult, err := c.service.GetImagesByAlbum(id)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), constants.ErrorDBRecordNotFound) || strings.Contains(err.Error(), constants.ErrorDBNoSuchTable) {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tEncodeJSONResponse(result, http.StatusOK, w)\n}", "func getAlbums(c *gin.Context) {\n\tc.IndentedJSON(http.StatusOK, albums)\n}", "func getImages(hostBase string, organization string, application string) (*http.Response, []*server.Image, error) {\n\n\turl := getImagesURL(hostBase, organization, application)\n\n\tkiln.LogInfo.Printf(\"Invoking get at URL %s\", url)\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", \"e30K.e30K.e30K\"))\n\tclient := &http.Client{}\n\tresponse, err := client.Do(req)\n\n\timages := []*server.Image{}\n\n\tbytes, err := ioutil.ReadAll(response.Body)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tbody := string(bytes)\n\n\tkiln.LogInfo.Printf(\"Response is %s\", body)\n\n\tjson.Unmarshal(bytes, &images)\n\n\treturn response, images, err\n\n}", "func getAlbums(c *gin.Context) {\n\t// serialize the struct into JSON and add it to the response.\n\tc.IndentedJSON(http.StatusOK, albums)\n}", "func getAlbums(c *gin.Context) {\n\talbums := GetAlbums()\n\n\tif albums == nil {\n\t\tc.IndentedJSON(http.StatusNotFound, gin.H{\"message\": \"Can not obtain list of albums\"})\n\t\treturn\n\t}\n\n\tc.IndentedJSON(http.StatusOK, albums)\n}", "func getAllPhotos(flickrOAuth FlickrOAuth, apiName string, setId string) map[string]Photo {\n\n\tvar err error\n\tvar body []byte\n\tphotos := map[string]Photo{}\n\tcurrentPage := 1\n\tpageSize := 500\n\n\tfor {\n\n\t\textras := map[string]string{\"page\": strconv.Itoa(currentPage)}\n\t\textras[\"per_page\"] = strconv.Itoa(pageSize)\n\t\textras[\"extras\"] = \"media,url_o\"\n\t\tif len(setId) > 0 {\n\t\t\textras[\"photoset_id\"] = setId\n\t\t}\n\n\t\tbody, err = makeGetRequest(func() string { return generateOAuthUrl(apiBaseUrl, apiName, flickrOAuth, extras) })\n\t\tif err != nil {\n\t\t\tlogMessage(\"Could not unmarshal body, check logs for body detail.\", true)\n\t\t\tlogMessage(string(body), false)\n\t\t\treturn map[string]Photo{}\n\t\t}\n\n\t\tresponsePhotos := []Photo{}\n\t\tvar err error\n\t\tif apiName == getPhotosNotInSetName {\n\t\t\tresponse := PhotosNotInSetResponse{}\n\t\t\terr = xml.Unmarshal(body, &response)\n\t\t\tif err == nil {\n\t\t\t\tresponsePhotos = response.Photos\n\t\t\t}\n\t\t} else {\n\t\t\tresponse := PhotosResponse{}\n\t\t\terr = xml.Unmarshal(body, &response)\n\t\t\tif err == nil {\n\t\t\t\tresponsePhotos = response.Set.Photos\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\n\t\t\t// We couldn't unmarshal the response as photos, but it might be the case\n\t\t\t// that we just ran out of photos, i.e. the set has a multiple of 500 photos in it\n\t\t\t// Lets try to unmarshal the response as an error, and if it is, error code \"1\" means\n\t\t\t// we're good and we can take what we've got and roll on.\n\t\t\terrorResponse := FlickrErrorResponse{}\n\t\t\terr = xml.Unmarshal(body, &errorResponse)\n\t\t\tif err != nil {\n\n\t\t\t\tlogMessage(\"Could not unmarshal body, check logs for body detail.\", true)\n\t\t\t\tlogMessage(string(body), false)\n\t\t\t\treturn map[string]Photo{}\n\t\t\t}\n\n\t\t\t// The \"good\" error code\n\t\t\tif errorResponse.Error.Code == \"1\" {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlogMessage(\"An error occurred while getting photos for the set. Check the body in the logs.\", false)\n\t\t\tlogMessage(string(body), false)\n\t\t}\n\n\t\tfor _, v := range responsePhotos {\n\t\t\tphotos[v.Id] = v\n\t\t}\n\n\t\t// If we didn't get 500 photos, then we're done.\n\t\t// There are no more photos to get.\n\t\tif len(responsePhotos) < pageSize {\n\t\t\tbreak\n\t\t}\n\n\t\tcurrentPage++\n\t}\n\n\treturn photos\n}", "func getAlbums(c *gin.Context) {\n\n\tvar albums []album.Album\n\n\tdbClient.Select(&albums, \"SELECT id, title, artist, price FROM album;\")\n\n\tc.IndentedJSON(http.StatusOK, albums)\n}", "func GetUserImages(w http.ResponseWriter, r *http.Request) {\n\n\t//Get current Session\n\tsession, _ := store.Get(r, \"session\")\n\tname := session.Values[\"username\"].(string)\n\n\t//Get User\n\tuser, err := model.GetUserByUsername(name)\n\tif err != nil {\n\n\t\tw.WriteHeader(http.StatusConflict)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\n\t}\n\n\t//Get Images\n\timages, err := user.GetImages()\n\tif err != nil {\n\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\n\t}\n\n\t//Get like and comment counts for each Image\n\tfor i := 0; i < len(images); i++ {\n\n\t\timages[i].Likes, err = images[i].GetLikeCounts()\n\t\tif err != nil {\n\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\n\t\t}\n\t\tcomments, err := images[i].GetComments()\n\t\tif err != nil {\n\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\n\t\t}\n\t\timages[i].Comments = len(comments)\n\n\t}\n\n\t//Make Response JSON\n\tresponseModel := struct {\n\t\tImages []model.Image\n\t}{\n\t\tImages: images,\n\t}\n\tresponseJSON, err := json.Marshal(responseModel)\n\tif err != nil {\n\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\n\t}\n\n\t//Write response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(responseJSON)\n\n}", "func ShowImagesInAlbum(albName string) ([]string, *utils.ApplicationError) {\n\treturn model.ShowImagesInAlbum(albName)\n}", "func GetPhotosByAlbumKeyHandler(w http.ResponseWriter, r *http.Request) {\n\tfuncTag := \"GetPhotosByAlbumKeyHandler\"\n\n\t// process request params\n\tmp, err := requester.GetRequestParams(r, nil, routeKeyAlbumID)\n\tif err != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"process request params\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// get the photos\n\tps, err := photoDB.GetPhotosByAlbumKey(mp[routeKeyAlbumID])\n\tif err != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"get photos by album key\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// give the photos their url from s3\n\t// TODO: have the client pass in a quality filter via query params (\"1024\" below)\n\tfor _, p := range ps {\n\t\trelativePath := fmt.Sprintf(\"%s/%s\", \"1024\", p.Src)\n\t\tp.Src = aws.S3PublicAssetURL(relativePath)\n\t}\n\n\t// build the return data\n\tres := &GetPhotosResponse{}\n\tres.Photos = ps\n\n\t// return\n\tresponder.SendJSON(w, res)\n}", "func getAlbums(c *gin.Context) {\n\tc.Header(\"Access-Control-Allow-Origin\", \"*\")\n\tc.Header(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\tc.IndentedJSON(http.StatusOK, albums)\n\n\trows, err := db.Query(\"SELECT * FROM department\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tfmt.Println(string(1))\n\t\tvar department Department\n\t\tif err := rows.Scan(&department.id, &department.name); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfmt.Println(\"Name: \", department.name)\n\t}\n}", "func getImages(app App) []docker.APIImages {\n\tpDebug(\"Getting images %s\", app.Image)\n\timgs, _ := client.ListImages(docker.ListImagesOptions{All: false, Filter: app.Image})\n\treturn imgs\n}", "func GetGalleryImages(db *sql.DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintf(w, \"GET /galleries/\"+mux.Vars(r)[\"id\"]+\"/images\")\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"Not Implemented\"))\n\t}\n}", "func GetAlbums(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tname := vars[\"name\"]\n\n\tsite, err := database.GetSiteByName(name)\n\tif err != nil {\n\t\tutils.RespondWithJSON(w, http.StatusNotFound, \"not_found\", nil)\n\t\treturn\n\t}\n\n\talbums, err := database.GetAlbums(site.ID)\n\tif err != nil {\n\t\tutils.RespondWithJSON(w, http.StatusInternalServerError, \"error\", nil)\n\t\treturn\n\t}\n\n\tutils.RespondWithJSON(w, http.StatusOK, \"success\", albums)\n\treturn\n}", "func getImages() ([]types.ImageSummary, error) {\n\timages, err := client.ImageList(context.Background(), types.ImageListOptions{})\n\tif err != nil {\n\t\treturn []types.ImageSummary{}, err\n\t}\n\treturn images, nil\n}", "func (m *VirtualEndpoint) GetGalleryImages()([]CloudPcGalleryImageable) {\n val, err := m.GetBackingStore().Get(\"galleryImages\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]CloudPcGalleryImageable)\n }\n return nil\n}", "func (r QuayAdapter) GetImageNames() ([]string, error) {\n\tlog.Debug(\"QuayAdapter::GetImages\")\n\tlog.Debug(\"BundleSpecLabel: %s\", BundleSpecLabel)\n\tlog.Debug(\"Loading image list for quay.io Org: [ %v ]\", r.config.Org)\n\n\tvar imageList []string\n\n\t// check if we're configured for specific images\n\tif len(r.config.Images) > 0 {\n\t\tlog.Debugf(\"Configured to use images: %v\", r.config.Images)\n\t\timageList = append(imageList, r.config.Images...)\n\t}\n\n\t// discover images\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(quayCatalogURL, r.config.URL, r.config.Org), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %v\", r.config.Token))\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to load catalog response at %s - %v\", fmt.Sprintf(quayCatalogURL, r.config.URL, r.config.Org), err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tcatalogResp := quayImageResponse{}\n\terr = json.NewDecoder(resp.Body).Decode(&catalogResp)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to decode Catalog response from '%s'\", fmt.Sprintf(quayCatalogURL, r.config.URL, r.config.Org))\n\t\treturn nil, err\n\t}\n\n\tfor _, repo := range catalogResp.Repositories {\n\t\timageList = append(imageList, repo.Name)\n\t}\n\n\tif len(imageList) == 0 {\n\t\tlog.Warn(\"image list is empty. No images were discovered\")\n\t\treturn imageList, nil\n\t}\n\n\tvar uniqueList []string\n\timageMap := make(map[string]struct{})\n\tfor _, image := range imageList {\n\t\timageMap[image] = struct{}{}\n\t}\n\n\t// create a unique image list\n\tfor key := range imageMap {\n\t\tuniqueList = append(uniqueList, key)\n\t}\n\treturn uniqueList, nil\n}", "func (s *Service) List(ctx context.Context) ([]Album, error) {\n\tvar result []Album\n\talbumsListCall := s.photos.List().PageSize(maxAlbumsPerPage).ExcludeNonAppCreatedData()\n\terr := albumsListCall.Pages(ctx, func(response *photoslibrary.ListAlbumsResponse) error {\n\t\tfor _, res := range response.Albums {\n\t\t\tresult = append(result, toAlbum(res))\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tvar emptyResult []Album\n\t\treturn emptyResult, fmt.Errorf(\"listing albums: %w\", err)\n\t}\n\treturn result, nil\n}", "func (h *Handler) GetImages(w http.ResponseWriter, r *http.Request) {\n\t// first list all the pools so that we can retrieve images from all pools\n\tpools, err := ceph.ListPoolSummaries(h.context, h.config.clusterInfo.Name)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to list pools: %+v\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tresult := []model.BlockImage{}\n\n\t// for each pool, get further details about all the images in the pool\n\tfor _, p := range pools {\n\t\timages, ok := h.getImagesForPool(w, p.Name)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tresult = append(result, images...)\n\t}\n\n\tFormatJsonResponse(w, result)\n}", "func GetImages(w http.ResponseWriter, r *http.Request) {\n\n\t//Get Formdata\n\tqueryVar := mux.Vars(r)\n\tlastrecordedtime := queryVar[\"[0-9]+\"]\n\n\t//Get ImageIDs\n\timageIDs, err := model.GetImageIDs(lastrecordedtime)\n\tif err != nil {\n\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\n\t}\n\n\t//Make Response Model\n\tresponseModel := struct {\n\t\tImagesIDs []string\n\t}{\n\t\tImagesIDs: imageIDs,\n\t}\n\n\t//Make ResponeJSON\n\tresponseJSON, err := json.Marshal(responseModel)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t//Write response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(responseJSON)\n\n}", "func (r *Client) GetAlbumImage(id int) (image string, hasImage bool) {\n\t// Get the first image in the dir if there is one\n\tlocation := r.Db.Entries[id].Location\n\tstrId := strconv.Itoa(id)\n\timagePath := \"/albums/a\" + strId + \".jpg\"\n\n\t// // Check if already exists\n\t// if _, err := os.Stat(\"public\" + imagePath); err == nil {\n\t// \t// File already exists\n\t// \timage = imagePath\n\t// \thasImage = true\n\t// \treturn\n\t// }\n\n\t// Remove the bits we dont want\n\tlocation = strings.TrimLeft(location, \"file:/\")\n\tlastSlash := strings.LastIndex(location, \"/\")\n\tlocation = location[:lastSlash+1]\n\t// fmt.Println(html.UnescapeString(location))\n\n\te, _ := url.QueryUnescape(location)\n\n\tfilepath.Walk(\"/\"+e, func(path string, _ os.FileInfo, _ error) error {\n\n\t\tlastFour := path[len(path)-4:]\n\t\tif lastFour == \".jpg\" || lastFour == \"jpeg\" || lastFour == \".png\" {\n\t\t\tCopy(\"public/albums/a\"+strId+\".jpg\", path)\n\t\t\timage = imagePath\n\t\t\thasImage = true\n\t\t\treturn nil\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn\n}", "func (f Flickr) PhotosetsGetPhotos(photosetID string, userID string, page int) jsonstruct.PhotosetsGetPhotos {\n\targs := make(map[string]string)\n\targs[\"method\"] = \"flickr.photosets.getPhotos\"\n\targs[\"photoset_id\"] = photosetID\n\targs[\"user_id\"] = userID\n\targs[\"per_page\"] = strconv.Itoa(perPage)\n\targs[\"page\"] = strconv.Itoa(page)\n\n\tjsonData := f.HTTPGet(utils.APIURL, args)\n\n\tvar data jsonstruct.PhotosetsGetPhotos\n\tif err := json.Unmarshal(jsonData, &data); err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn data\n}", "func GetImages(w http.ResponseWriter, r *http.Request) {\n\ttools.SetHeader(w)\n\tif r.Method != \"GET\" {\n\t\treturn\n\t}\n\tvars := r.URL.Query()\n\tname := vars[\"name\"][0]\n\tfmt.Println(name)\n\t//find the images and return []byte\n\tfilepath := imgpath + name\n\ttemp, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\ttools.HandleError(\"GetImages readfile error :\", err, 1)\n\t\treturn\n\t}\n\tw.Write(temp)\n\treturn\n}", "func (c Album) GetAlbums() revel.Result {\n\tre := albumService.GetAlbums(c.GetUserId())\n\treturn c.RenderJSON(re)\n}", "func ListContainerImages(settings *playfab.Settings, postData *ListContainerImagesRequestModel, entityToken string) (*ListContainerImagesResponseModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/MultiplayerServer/ListContainerImages\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &ListContainerImagesResponseModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func (m *Group) GetPhotos()([]ProfilePhotoable) {\n return m.photos\n}", "func GetAllImages(sess *session.Session) (imageMap map[string]ImageDetailList, err error) {\n repos, err := GetRepositories(sess)\n if err != nil { return imageMap, err}\n\n imageMap = make(map[string]ImageDetailList, len(repos))\n for _, r := range repos {\n idl, err := GetImages(*r.RepositoryName, sess)\n if err != nil { return imageMap, err }\n sort.Sort(sort.Reverse(ByPushedAt(idl)))\n imageMap[*r.RepositoryName] = idl\n }\n return imageMap, err\n}", "func GetUploadedImages(ctx *gin.Context) {\n\n\tuserToken, _ := ctx.Get(\"user_token\")\n\tassertedUserToken := userToken.(models.UserToken)\n\n\tpaginator, _ := ctx.Get(\"paginator\")\n\tassertedPaginator := paginator.(inputs.Paginator)\n\n\tctx.JSON(http.StatusOK, services.GetUploadedImages(&assertedUserToken.User, &assertedPaginator))\n}", "func (a *ImageApiService) GetArtistImage(ctx _context.Context, name string, imageType ImageType, imageIndex int32, localVarOptionals *GetArtistImageOpts) (*os.File, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue *os.File\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/Artists/{name}/Images/{imageType}/{imageIndex}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", _neturl.QueryEscape(parameterToString(name, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"imageType\"+\"}\", _neturl.QueryEscape(parameterToString(imageType, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"imageIndex\"+\"}\", _neturl.QueryEscape(parameterToString(imageIndex, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Tag.IsSet() {\n\t\tlocalVarQueryParams.Add(\"tag\", parameterToString(localVarOptionals.Tag.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Format.IsSet() {\n\t\tlocalVarQueryParams.Add(\"format\", parameterToString(localVarOptionals.Format.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.MaxWidth.IsSet() {\n\t\tlocalVarQueryParams.Add(\"maxWidth\", parameterToString(localVarOptionals.MaxWidth.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.MaxHeight.IsSet() {\n\t\tlocalVarQueryParams.Add(\"maxHeight\", parameterToString(localVarOptionals.MaxHeight.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PercentPlayed.IsSet() {\n\t\tlocalVarQueryParams.Add(\"percentPlayed\", parameterToString(localVarOptionals.PercentPlayed.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.UnplayedCount.IsSet() {\n\t\tlocalVarQueryParams.Add(\"unplayedCount\", parameterToString(localVarOptionals.UnplayedCount.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Width.IsSet() {\n\t\tlocalVarQueryParams.Add(\"width\", parameterToString(localVarOptionals.Width.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Height.IsSet() {\n\t\tlocalVarQueryParams.Add(\"height\", parameterToString(localVarOptionals.Height.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Quality.IsSet() {\n\t\tlocalVarQueryParams.Add(\"quality\", parameterToString(localVarOptionals.Quality.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.CropWhitespace.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cropWhitespace\", parameterToString(localVarOptionals.CropWhitespace.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.AddPlayedIndicator.IsSet() {\n\t\tlocalVarQueryParams.Add(\"addPlayedIndicator\", parameterToString(localVarOptionals.AddPlayedIndicator.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Blur.IsSet() {\n\t\tlocalVarQueryParams.Add(\"blur\", parameterToString(localVarOptionals.Blur.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.BackgroundColor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"backgroundColor\", parameterToString(localVarOptionals.BackgroundColor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.ForegroundLayer.IsSet() {\n\t\tlocalVarQueryParams.Add(\"foregroundLayer\", parameterToString(localVarOptionals.ForegroundLayer.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"image/_*\", \"application/json\", \"application/json; profile=CamelCase\", \"application/json; profile=PascalCase\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-Emby-Authorization\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func GetAllImages(apiKey, search string, perPage uint64, page uint64) ([]byte, error) {\n\tattr := map[string]string{\n\t\t\"method\": searchMethod,\n\t\t\"api_key\": apiKey,\n\t\t\"text\": search,\n\t\t\"per_page\": strconv.FormatUint(perPage, 10),\n\t\t\"page\": strconv.FormatUint(page, 10),\n\t\t\"format\": \"json\",\n\t}\n\tsearchURL, err := buildURL(apiKey, attr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"search query %s\", searchURL)\n\tresponse, err := http.Get(searchURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif response.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(response.Status)\n\t}\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timages, err := processResponse(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal(images)\n\n}", "func (a *ImageApiService) GetItemImageInfos(ctx _context.Context, itemId string) ([]ImageInfo, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []ImageInfo\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/Items/{itemId}/Images\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"itemId\"+\"}\", _neturl.QueryEscape(parameterToString(itemId, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"application/json; profile=CamelCase\", \"application/json; profile=PascalCase\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-Emby-Authorization\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (imp *Importer) fetchImages() {\n err := downloadImages(\n imp.idPath,\n func(id string, bodyRdr io.Reader) error {\n img, err := jpeg.Decode(bodyRdr)\n if err == nil {\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n } else {\n log.Printf(\"Error decoding image %s to jpeg\\n\", id)\n }\n return nil\n },\n )\n\n if err != nil { imp.sendErr(err) }\n}", "func (imageService Service) Images() (image []Response, err error) {\n\treturn imageService.QueryImages(nil)\n}", "func (imageService Service) Images() (image []Response, err error) {\n\treturn imageService.QueryImages(nil)\n}", "func GetImages(\n\tstatus string, opts model.ImageOptions, categories []string,\n) ([]model.Image, error) {\n\n\tswitch status {\n\tcase \"unprocessed\":\n\t\treturn GetUnprocessedImages(opts)\n\tcase \"uncategorized\":\n\t\treturn mongodb.GetImages(opts, nil)\n\tcase \"autocategorized\":\n\t\treturn mongodb.GetImages(opts, &model.CategoryMap{\n\t\t\tProposed: categories,\n\t\t})\n\tcase \"categorized\":\n\t\treturn mongodb.GetImages(opts, &model.CategoryMap{\n\t\t\tAssigned: categories,\n\t\t})\n\tdefault:\n\t\treturn mongodb.GetImages(opts, &model.CategoryMap{})\n\t}\n}", "func apiImagesHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {\n\tuserName := sessionHandler.GetUserName(r)\n\tif userName != \"\" {\n\t\tnumber := params[\"number\"]\n\t\tpage, err := strconv.Atoi(number)\n\t\tif err != nil || page < 1 {\n\t\t\thttp.Error(w, \"Not a valid api function!\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\timages := make([]string, 0)\n\t\t// Walk all files in images folder\n\t\terr = filepath.Walk(filenames.ImagesFilepath, func(filePath string, info os.FileInfo, err error) error {\n\t\t\tif !info.IsDir() && (strings.EqualFold(filepath.Ext(filePath), \".jpg\") || strings.EqualFold(filepath.Ext(filePath), \".jpeg\") || strings.EqualFold(filepath.Ext(filePath), \".gif\") || strings.EqualFold(filepath.Ext(filePath), \".png\") || strings.EqualFold(filepath.Ext(filePath), \".svg\")) {\n\t\t\t\t// Rewrite to file path on server\n\t\t\t\tfilePath = strings.Replace(filePath, filenames.ImagesFilepath, \"/images\", 1)\n\t\t\t\t// Make sure to always use \"/\" as path separator (to make a valid url that we can use on the blog)\n\t\t\t\tfilePath = filepath.ToSlash(filePath)\n\t\t\t\t// Prepend file to slice (thus reversing the order)\n\t\t\t\timages = append([]string{filePath}, images...)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif len(images) == 0 {\n\t\t\t// Write empty json array\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write([]byte(\"[]\"))\n\t\t\treturn\n\t\t}\n\t\timagesPerPage := 15\n\t\tstart := (page * imagesPerPage) - imagesPerPage\n\t\tend := page * imagesPerPage\n\t\tif start > (len(images) - 1) {\n\t\t\t// Write empty json array\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write([]byte(\"[]\"))\n\t\t\treturn\n\t\t}\n\t\tif end > len(images) {\n\t\t\tend = len(images)\n\t\t}\n\t\tjson, err := json.Marshal(images[start:end])\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(json)\n\t\treturn\n\t}\n\thttp.Error(w, \"Not logged in!\", http.StatusInternalServerError)\n}", "func showImage(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\",\"application/json\")\n\tparam := mux.Vars(r)\n\titer:=Session.Query(\"SELECT imagelist FROM albumtable WHERE albname='?';\",param[\"image\"]).Iter()\n\tvar data []string\n\tfor iter.Scan(&data){\n\t\tfor _, img := range data {\n\t\t\tif img == param[\"image\"] {\n\t\t\t\tjson.NewEncoder(w).Encode(img)\n\t\t\t}\n\t\t}\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (imageService Service) QueryImages(queryParameters *QueryParameters) ([]Response, error) {\n\timagesContainer := imagesResponse{}\n\terr := imageService.queryImages(false /*includeDetails*/, &imagesContainer, queryParameters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn imagesContainer.Images, nil\n}", "func (imageService Service) QueryImages(queryParameters *QueryParameters) ([]Response, error) {\n\timagesContainer := imagesResponse{}\n\terr := imageService.queryImages(false /*includeDetails*/, &imagesContainer, queryParameters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn imagesContainer.Images, nil\n}", "func (v *IBM) GetImages(ctx *lepton.Context) ([]lepton.CloudImage, error) {\n\tclient := &http.Client{}\n\n\tc := ctx.Config()\n\tzone := c.CloudConfig.Zone\n\n\tregion := extractRegionFromZone(zone)\n\n\turi := \"https://\" + region + \".iaas.cloud.ibm.com/v1/images?version=2023-02-28&generation=2&visibility=private\"\n\n\treq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Authorization\", \"Bearer \"+v.iam)\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err := io.ReadAll(res.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tilr := &ImageListResponse{}\n\terr = json.Unmarshal(body, &ilr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tvar images []lepton.CloudImage\n\n\tfor _, img := range ilr.Images {\n\t\timages = append(images, lepton.CloudImage{\n\t\t\tID: img.ID,\n\t\t\tName: img.Name,\n\t\t\tStatus: img.Status,\n\t\t\tPath: \"\",\n\t\t})\n\t}\n\n\treturn images, nil\n\n}", "func GetLinkClient(albumID string, clientID string) (imageLink string) {\n\n\t// This hash is the albumID hash\n\turl := \"https://api.imgur.com/3/album/\" + albumID + \"/images\"\n\tmethod := \"GET\"\n\n\tpayload := &bytes.Buffer{}\n\twriter := multipart.NewWriter(payload)\n\terr := writer.Close()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(method, url, payload)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treq.Header.Add(\"Authorization\", \"Client-ID \"+clientID)\n\n\treq.Header.Set(\"Content-Type\", writer.FormDataContentType())\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"[-] Error connecting:\", err)\n\t}\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\n\tvar results AlbumImages\n\terrr := json.Unmarshal([]byte(body), &results)\n\tif errr != nil {\n\t\tfmt.Println(\"[!] Error unmarshalling::\", errr)\n\t}\n\n\tdatavalues := results.Data\n\tif results.Success == true {\n\t\timageLink = datavalues[0].Link\n\t}\n\treturn imageLink\n\n}", "func NewGetImagesByAlbumApiController(s GetImagesByAlbumApiServicer) Router {\n\treturn &GetImagesByAlbumApiController{service: s}\n}", "func ListImages(client *httputil.ClientConn) ([]string, error) {\n\tbody, err := utils.Do(client, \"GET\", \"/1.0/images\", nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to list images: %v\", err)\n\t}\n\tvar res utils.ListContainerResponse\n\tif err := json.Unmarshal(body, &res); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal ListImages: %v\", err)\n\t}\n\treturn res.Metadata, nil\n}", "func ReadAllImages(output http.ResponseWriter, reader *http.Request) {\n\tLog(\"info\", \"Endpoint Hit: ReadAllImages\")\n\tresults, err := DB.Query(\"SELECT * FROM images\")\n\tErrorHandler(err)\n\n\tvar images Images\n\tfor results.Next() {\n\t\tvar img Image\n\t\t// for each row, scan the result into our image composite object\n\t\terr = results.Scan(&img.ID, &img.Image, &img.Thumbnail, &img.Caption)\n\t\tErrorHandler(err)\n\n\t\t// Append images to array\n\t\timages = append(images, img)\n\t}\n\n\tdefer results.Close()\n\toutput.Header().Set(\"Content-Type\", \"application/json\")\n\toutput.WriteHeader(http.StatusOK)\n\tJSON.NewEncoder(output).Encode(images)\n}", "func (m *Provider) getGenrePics(genre, url string) error {\n\toffset := 0\n\tfor {\n\t\tgenreurl := url + strconv.Itoa(offset)\n\t\tresp, err := soup.Get(genreurl)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdoc := soup.HTMLParse(resp)\n\t\tlinks := doc.FindAll(\"a\", \"class\", \"wallpaper\")\n\t\tif len(links) == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\tpics := []string{}\n\t\tfor _, l := range links {\n\t\t\tp := strings.Replace(strings.Split(l.Attrs()[\"href\"], \"-\")[0], \"wallpaper\", \"download\", 1)\n\t\t\tpics = append(pics, p)\n\t\t}\n\n\t\tm.addPicURL(genre, pics)\n\t\toffset += len(links)\n\t}\n}", "func GetAlbums() []*Album {\n\tlastFmDetails := GetLastFmConfiguration()\n\tvar albums []*Album\n\n\tapi := lastfm.New(lastFmDetails.ApiKey, lastFmDetails.ApiSecret)\n\tresult, _ := api.User.GetTopAlbums(lastfm.P{\"user\": \"jessicaward25\"}) //discarding error\n\n\tfor _, album := range result.Albums {\n\t\talbums = append(albums, NewAlbum(album.Name, album.Artist.Name, album.Rank, album.Images[0].Url, album.PlayCount))\n\t}\n\n\treturn albums\n}", "func GetImageDetail(request *model.RequestImageDetail) ([]model.ImageDetail, error) {\n\tlog.Printf(\"call GetImageDetail:%v\", request)\n\tvar err error\n\tvar imageTags []swagger.DetailedTag\n\tvar imageDetails []model.ImageDetail\n\tharborClient := client.GetHarborClient()\n\n\trepoName := request.ProjectName + \"/\" + request.ImageName\n\n\t// get tags\n\tif request.Tag == \"\" {\n\t\timageTags, _, err = harborClient.RepositoriesRepoNameTagsGet(repoName)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Failed to get RepositoriesRepoNameTagsGet: %v\", err)\n\t\t\treturn imageDetails, errors.New(msg)\n\t\t}\n\t} else {\n\t\timageTag, _, err := harborClient.RepositoriesRepoNameTagsTagGet(repoName, request.Tag)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Failed to get RepositoriesRepoNameTagsTagGet: %v\", err)\n\t\t\treturn imageDetails, errors.New(msg)\n\t\t}\n\t\timageTags = append(imageTags, *imageTag)\n\t}\n\tlog.Printf(\"imageTags:%#v\", imageTags)\n\n\t// get manifest\n\tfor _, imageTag := range imageTags {\n\t\tvar imageDetail model.ImageDetail\n\t\tmanifest, _, err := harborClient.RepositoriesRepoNameTagsTagManifestGet(repoName, imageTag.Name, \"\")\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Failed to get RepositoriesRepoNameTagsTagManifestGet: %v\", err)\n\t\t\treturn imageDetails, errors.New(msg)\n\t\t}\n\t\timageDetail.Image = imageTag\n\t\timageDetail.Manifest = *manifest\n\t\timageDetails = append(imageDetails, imageDetail)\n\t}\n\n\tlog.Printf(\"imageDetails:%#v\", imageDetails)\n\treturn imageDetails, nil\n}", "func (m *User) GetPhotos()([]ProfilePhotoable) {\n return m.photos\n}", "func GetImageLists(c echo.Context) error {\n\treturn c.JSON(http.StatusOK, model.GetFilePaths())\n}", "func (a *ImageApiService) GetMusicGenreImage(ctx _context.Context, name string, imageType ImageType, imageIndex int32, localVarOptionals *GetMusicGenreImageOpts) (*os.File, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue *os.File\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/MusicGenres/{name}/Images/{imageType}/{imageIndex}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", _neturl.QueryEscape(parameterToString(name, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"imageType\"+\"}\", _neturl.QueryEscape(parameterToString(imageType, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"imageIndex\"+\"}\", _neturl.QueryEscape(parameterToString(imageIndex, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Tag.IsSet() {\n\t\tlocalVarQueryParams.Add(\"tag\", parameterToString(localVarOptionals.Tag.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Format.IsSet() {\n\t\tlocalVarQueryParams.Add(\"format\", parameterToString(localVarOptionals.Format.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.MaxWidth.IsSet() {\n\t\tlocalVarQueryParams.Add(\"maxWidth\", parameterToString(localVarOptionals.MaxWidth.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.MaxHeight.IsSet() {\n\t\tlocalVarQueryParams.Add(\"maxHeight\", parameterToString(localVarOptionals.MaxHeight.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PercentPlayed.IsSet() {\n\t\tlocalVarQueryParams.Add(\"percentPlayed\", parameterToString(localVarOptionals.PercentPlayed.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.UnplayedCount.IsSet() {\n\t\tlocalVarQueryParams.Add(\"unplayedCount\", parameterToString(localVarOptionals.UnplayedCount.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Width.IsSet() {\n\t\tlocalVarQueryParams.Add(\"width\", parameterToString(localVarOptionals.Width.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Height.IsSet() {\n\t\tlocalVarQueryParams.Add(\"height\", parameterToString(localVarOptionals.Height.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Quality.IsSet() {\n\t\tlocalVarQueryParams.Add(\"quality\", parameterToString(localVarOptionals.Quality.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.CropWhitespace.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cropWhitespace\", parameterToString(localVarOptionals.CropWhitespace.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.AddPlayedIndicator.IsSet() {\n\t\tlocalVarQueryParams.Add(\"addPlayedIndicator\", parameterToString(localVarOptionals.AddPlayedIndicator.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Blur.IsSet() {\n\t\tlocalVarQueryParams.Add(\"blur\", parameterToString(localVarOptionals.Blur.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.BackgroundColor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"backgroundColor\", parameterToString(localVarOptionals.BackgroundColor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.ForegroundLayer.IsSet() {\n\t\tlocalVarQueryParams.Add(\"foregroundLayer\", parameterToString(localVarOptionals.ForegroundLayer.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"image/_*\", \"application/json\", \"application/json; profile=CamelCase\", \"application/json; profile=PascalCase\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-Emby-Authorization\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (s *Client) Image(fileID string, page int) (file []byte, err error) {\n\tif page <= 0 {\n\t\tpage = 1\n\t}\n\tqueryParam := fmt.Sprintf(\"?page=%d\", page)\n\turl := strings.Join([]string{s.config.apiBaseURL, \"/result/image/\", fileID, queryParam}, \"\")\n\n\tlog.Printf(\"get image url %s\", url)\n\treq, err := http.NewRequest(\"GET\", url, strings.NewReader(\"\"))\n\tif err != nil {\n\t\treturn\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", strings.Join([]string{\"Bearer \", s.getToken()}, \"\"))\n\n\tres, err := s.httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\tfile, err = ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func GetNextAlbumsForArtist(url string) (pa SimpleAlbumsPaged, err error) {\n\tt := getAccessToken()\n\n\tr, err := http.NewRequest(\"GET\", url, nil)\n\tr.Header.Add(\"Authorization\", \"Bearer \"+t)\n\n\terr = makeRequest(r, &pa)\n\n\treturn pa, err\n}", "func (us *UsersService) Photos(username string, queryParams client.QueryParams) ([]client.Photo, error) {\n\tctx := context.Background()\n\treturn us.client.GetUserPhotos(ctx, username, queryParams)\n}", "func (l *Lidarr) GetAlbum(mbID string) ([]*Album, error) {\n\treturn l.GetAlbumContext(context.Background(), mbID)\n}", "func getImagespaces(hostBase string) (*http.Response, []*server.Organization, error) {\n\n\turl := fmt.Sprintf(\"%s/\", hostBase)\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", \"e30K.e30K.e30K\"))\n\tclient := &http.Client{}\n\tresponse, err := client.Do(req)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\trepositories := []*server.Organization{}\n\n\tbytes, err := ioutil.ReadAll(response.Body)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjson.Unmarshal(bytes, &repositories)\n\n\treturn response, repositories, err\n\n}", "func GetMarsPhotos(date string, camera string) (*models.Photos, error) {\n\t// Create URL data\n\turlData := url.Values{}\n\turlData.Set(\"earth_date\", date)\n\tif camera != \"\" && camera != \"all\" {\n\t\turlData.Set(\"camera\", camera)\n\t}\n\turlData.Set(\"api_key\", config.Nasa.APIKey)\n\n\turl, _ := url.ParseRequestURI(config.Nasa.APIURL)\n\turl.Path = config.Nasa.MRPPath\n\turl.RawQuery = urlData.Encode()\n\tencodedUrl := fmt.Sprintf(\"%v\", url)\n\n\tfmt.Printf(\"URL: %v\\n\", encodedUrl)\n\n\t// Send a request to NASA API\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", encodedUrl, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(urlData.Encode())))\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tjsonData, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse response from API\n\tphotos := &models.Photos{}\n\terr = json.Unmarshal(jsonData, photos)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Store in cache\n\terr = cache.SetMarsPhotos(date, camera, string(jsonData))\n\tif err != nil {\n\t\tlog.Printf(\"mars-rover-photos: failed to store in cache: %v\\n\", err)\n\t}\n\n\treturn photos, nil\n}", "func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*ListImagesResponse, error) {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"namespace_id\", req.NamespaceID)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn nil, errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/registry/v1/regions/\" + fmt.Sprint(req.Region) + \"/images\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListImagesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (c *GalleryImageClient) Get(ctx context.Context, location, name string) (*[]compute.GalleryImage, error) {\n\treturn c.internal.Get(ctx, location, name)\n}", "func GetPhotos() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tconn, err := middleware.ExtractSession(r)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\t\tdao := model.PhotoDao{DB: conn}\n\t\tphotos, err := dao.Read()\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tjson.NewEncoder(w).Encode(photos)\n\t})\n}", "func (p *parser) GetPictures(a *goquery.Selection) []string {\n\tv := picture{area: a, cleanImg: p.cleanImg}\n\tv.setDetail()\n\treturn v.data\n}", "func GetAlbum(urlArgs url.Values, db *neoism.Database) string {\n\t// Pull selection data from url arguments\n\tas := AlbumSelect{Name: urlArgs.Get(\"name\"), Year: urlArgs.Get(\"year\"), Genre: urlArgs.Get(\"genre\"), Artist: urlArgs.Get(\"artist\")}\n\n\t// Pull Neo4j nodes from DB matching selecton params\n\tres := []struct {\n\t\tN string `json:\"n.name\"`\n\t\tY string `json:\"n.year\"`\n\t\tS int32 `json:\"n.submitted\"`\n\t}{}\n\n\tcq := neoism.CypherQuery{\n\t\t// We use regex matches (=~) to gracefully account for\n\t\t// missing fields, so we can use .*\n\t\tStatement: `\n\t\t\tMATCH (n:Album)\n\t\t\tWHERE n.name =~ {name} AND n.year =~ {year}\n\t\t\tRETURN n.name, n.year, n.submitted;\n\t\t`,\n\t\t// DefMatch is substituting .* for us when necessary\n\t\tParameters: neoism.Props{\"name\": DefMatch(as.Name), \"year\": DefMatch(as.Year)},\n\t\tResult: &res,\n\t}\n\tdb.Cypher(&cq)\n\n\t// Turn the list of Nodes into a list of Albums\n\talbums := make([]Album, 1)\n\tfor _, el := range res {\n\t\tname := el.N\n\t\tyear := el.Y\n\t\tsubmitted := el.S\n\n\t\ta := Album{Name: name, Year: year, Submitted: submitted}\n\t\talbums = append(albums, a)\n\t}\n\n\t// Turn the list of albums into a json representation\n\tjsonReturn, err := json.Marshal(albums)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn string(jsonReturn)\n}", "func GetUnsplashImages(rawurl string) []Image {\n\t_, err := url.ParseRequestURI(rawurl)\n\tCheck(err)\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", rawurl, nil)\n\tCheck(err)\n\n\treq.Header.Add(\"User-Agent\", \"uipgo\")\n\tresp, err := client.Do(req)\n\tCheck(err)\n\n\tdefer resp.Body.Close()\n\n\tret := [NOOFIMAGES]UnsplashImage{}\n\terr = json.NewDecoder(resp.Body).Decode(&ret)\n\tCheck(err)\n\n\t// type conversion for abiding to interface\n\tretImage := make([]Image, len(ret))\n\tfor i := range ret {\n\t\tretImage[i] = ret[i]\n\t}\n\n\treturn retImage\n}", "func (r *AlbumsService) List() *AlbumsListCall {\n\tc := &AlbumsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\treturn c\n}", "func listMyImages(w http.ResponseWriter, r *http.Request, parms martini.Params) {\r\n\tuid, _ := strconv.ParseInt(parms[\"id\"], 10, 64)\r\n\tvar i CRImage\r\n\tlogger.Println(uid)\r\n\timage := i.QuerybyUser(uid)\r\n\tlogger.Println(image)\r\n\tif err := json.NewEncoder(w).Encode(image); err != nil {\r\n\t\tlogger.Error(err)\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t}\r\n}", "func ArtistAlbum(id string, page, limit int) (string, error) {\n\t_offset, _limit := formatParams(page, limit)\n\tpreParams := \"{\\\"offset\\\": \"+ _offset +\", \\\"limit\\\": \"+_limit +\", \\\"total\\\": true, \\\"csrf_token\\\": \\\"\\\"}\"\n\tparams, encSecKey, encErr := EncParams(preParams)\n\tif encErr != nil {\n\t\treturn \"\", encErr\n\t}\n\tres, resErr := post(\"http://music.163.com/weapi/artist/albums/\"+id, params, encSecKey)\n\tif resErr != nil {\n\t\treturn \"\", resErr\n\t}\n\treturn res, nil\n}", "func (a *API) GetImages(name string) (*ecs.DescribeImagesResponse, error) {\n\trequest := ecs.CreateDescribeImagesRequest()\n\trequest.Scheme = \"https\"\n\trequest.ImageName = name\n\treturn a.ecs.DescribeImages(request)\n}", "func (d *DockerClient) GetImages() ([]DockerImage, error) {\n\tresp, err := d.makeRequest(\"GET\", fmt.Sprintf(\"%s/images/json\", d.pathPrefix), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"GetImages: error status code %s\", resp.StatusCode)\n\t}\n\tdefer resp.Body.Close()\n\tvar images []DockerImage\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(body, &images)\n\treturn images, err\n}", "func (c *APODClient) FetchImageURLs(count int) ([]string, error) {\n\tvar urls []string\n\tdate := time.Now()\n\n\t// make the request\n\tfor i := 0; len(urls) < count; i++ {\n\t\tdate = date.AddDate(0, 0, -i)\n\t\tresp, err := http.Get(c.buildURL(date))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error fetching data from APOD API\")\n\t\t}\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn nil, fmt.Errorf(\"Received non-200 status code %d\", resp.StatusCode)\n\t\t}\n\n\t\t// parse the response\n\t\tvar imageMeta APODImageMeta\n\t\tif err := json.NewDecoder(resp.Body).Decode(&imageMeta); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error parsing API response\")\n\t\t}\n\n\t\tif imageMeta.MediaType != APODTypeImage {\n\t\t\t// we only want images\n\t\t\tcontinue\n\t\t}\n\t\turls = append(urls, imageMeta.URL)\n\t}\n\n\treturn urls, nil\n}", "func (a *ImageApiService) GetGenreImage(ctx _context.Context, name string, imageType ImageType, imageIndex int32, localVarOptionals *GetGenreImageOpts) (*os.File, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue *os.File\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/Genres/{name}/Images/{imageType}/{imageIndex}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", _neturl.QueryEscape(parameterToString(name, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"imageType\"+\"}\", _neturl.QueryEscape(parameterToString(imageType, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"imageIndex\"+\"}\", _neturl.QueryEscape(parameterToString(imageIndex, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Tag.IsSet() {\n\t\tlocalVarQueryParams.Add(\"tag\", parameterToString(localVarOptionals.Tag.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Format.IsSet() {\n\t\tlocalVarQueryParams.Add(\"format\", parameterToString(localVarOptionals.Format.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.MaxWidth.IsSet() {\n\t\tlocalVarQueryParams.Add(\"maxWidth\", parameterToString(localVarOptionals.MaxWidth.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.MaxHeight.IsSet() {\n\t\tlocalVarQueryParams.Add(\"maxHeight\", parameterToString(localVarOptionals.MaxHeight.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PercentPlayed.IsSet() {\n\t\tlocalVarQueryParams.Add(\"percentPlayed\", parameterToString(localVarOptionals.PercentPlayed.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.UnplayedCount.IsSet() {\n\t\tlocalVarQueryParams.Add(\"unplayedCount\", parameterToString(localVarOptionals.UnplayedCount.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Width.IsSet() {\n\t\tlocalVarQueryParams.Add(\"width\", parameterToString(localVarOptionals.Width.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Height.IsSet() {\n\t\tlocalVarQueryParams.Add(\"height\", parameterToString(localVarOptionals.Height.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Quality.IsSet() {\n\t\tlocalVarQueryParams.Add(\"quality\", parameterToString(localVarOptionals.Quality.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.CropWhitespace.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cropWhitespace\", parameterToString(localVarOptionals.CropWhitespace.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.AddPlayedIndicator.IsSet() {\n\t\tlocalVarQueryParams.Add(\"addPlayedIndicator\", parameterToString(localVarOptionals.AddPlayedIndicator.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Blur.IsSet() {\n\t\tlocalVarQueryParams.Add(\"blur\", parameterToString(localVarOptionals.Blur.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.BackgroundColor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"backgroundColor\", parameterToString(localVarOptionals.BackgroundColor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.ForegroundLayer.IsSet() {\n\t\tlocalVarQueryParams.Add(\"foregroundLayer\", parameterToString(localVarOptionals.ForegroundLayer.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"image/_*\", \"application/json\", \"application/json; profile=CamelCase\", \"application/json; profile=PascalCase\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-Emby-Authorization\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func downloadImages(idPath string, respProc responseProcessor) (err error) {\n idFile, err := os.Open(idPath)\n if err != nil { return }\n defer idFile.Close()\n\n err = os.MkdirAll(STORE_DIR, STORE_PERM)\n if err != nil { return }\n\n scanner := bufio.NewScanner(idFile)\n for scanner.Scan() {\n id := scanner.Text()\n var resp *http.Response\n\n attempts := 0\n for {\n resp, err = http.Get(imageUrl(id))\n if err != nil {\n log.Print(err)\n time.Sleep(RETRY_DELAY)\n attempts++\n\n if attempts < RETRY_CUTOFF {\n continue\n } else {\n return err\n }\n }\n defer resp.Body.Close()\n break\n }\n\n err = respProc(id, resp.Body)\n if err != nil { return err }\n\n // break\n }\n\n return\n}", "func GetAllImages(cfg *v1.ClusterConfiguration, kubeadmCfg *kubeadmapi.ClusterConfiguration, operatorVersion string) []string {\n\timgs := images.GetControlPlaneImages(kubeadmCfg)\n\t//for _, component := range []string{\n\t//constants.OnecloudOperator,\n\t//} {\n\t//imgs = append(imgs, GetOnecloudImage(component, cfg, kubeadmCfg))\n\t//}\n\trepoPrefix := kubeadmCfg.ImageRepository\n\tfor img, version := range map[string]string{\n\t\tconstants.CalicoKubeControllers: constants.DefaultCalicoVersion,\n\t\tconstants.CalicoNode: constants.DefaultCalicoVersion,\n\t\tconstants.CalicoCNI: constants.DefaultCalicoVersion,\n\t\tconstants.RancherLocalPathProvisioner: constants.DefaultLocalProvisionerVersion,\n\t\tconstants.IngressControllerTraefik: constants.DefaultTraefikVersion,\n\t\tconstants.OnecloudOperator: operatorVersion,\n\t} {\n\t\timgs = append(imgs, GetGenericImage(repoPrefix, img, version))\n\t}\n\treturn imgs\n}", "func (client ListManagementImageClient) GetAllImageIdsResponder(resp *http.Response) (result ImageIds, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func GetAll(ctx *routing.Context) error {\n\tdb := ctx.Get(\"db\").(*gorm.DB)\n\tdata := []models.ImageModel{}\n\tdb.Model(&dbmodels.Image{}).Scan(&data)\n\n\tres := models.NewResponse(true, data, \"OK\")\n\n\treturn ctx.WriteData(res.MustMarshal())\n}", "func GetAlbums() *[]models.Album{\n\tvar albums []models.Album\n\tvar album models.Album\n\n\tdb, err := open()\n\tdefer db.Close()\n\tutil.CheckErr(\"GetAlbums\", err, true)\n\n\trows, err := db.Query(\"SELECT * FROM albums\")\n\n\tutil.CheckErr(\"GetAlbums\", err, true)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&album.Id,\n\t\t\t&album.Name,\n\t\t\t&album.ArtistId,\n\t\t\t&album.Year)\n\t\tutil.CheckErr(\"GetAlbums\", err, true)\n\t\talbums = append(albums, album)\n\t}\n\n\terr = rows.Err()\n\tutil.CheckErr(\"GetAlbums\", err, true)\n\treturn & albums\n}", "func allImagesHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tenableCors(&w)\n\t\tsession, err := mgo.Dial(mongodb_server)\n if err != nil {\n\t\t\tvar errorResponse ErrorResponse\n\t\t\terrorResponse.Message = \"Server Error\"\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, errorResponse)\n\t\t\tpanic(err)\n\t\t\treturn\n }\n defer session.Close()\n session.SetMode(mgo.Monotonic, true)\n conn := session.DB(mongodb_database).C(mongodb_collection)\n\t\tresult := make([]Image, 10, 10)\n\t\terr = conn.Find(nil).All(&result)\n if err != nil {\n\t\t\tlog.Print(err)\n\t\t\tvar errorResponse ErrorResponse\n\t\t\terrorResponse.Message = \"No image found\"\n\t\t\tformatter.JSON(w, http.StatusBadRequest, errorResponse)\n } else {\n\t\t\tformatter.JSON(w, http.StatusOK, result)\n\t\t}\n\t}\n}", "func GetAllPhotosByUser(username string) *[]Photo {\n\tvar photos []Photo\n\tvar photosFile []byte\n\n\tphotosFile, err := ioutil.ReadFile(packageTools.GetWD() + \"/static/data/photos_\" + username + \".json\")\n\n\tif err != nil {\n\t\tfmt.Println(\"Neue Datei anlegen: photos_\" + username + \".json\")\n\t}\n\n\terr = json.Unmarshal(photosFile, &photos)\n\n\tif err != nil {\n\t\t// panic(err)\n\t}\n\n\treturn &photos\n}", "func GetPhotosByMonthHandler(c *gin.Context) {\n\tif !service.VerifyAPIRequest(c, c.Request.Header[\"Token\"]) {\n\t\treturn\n\t}\n\n\toffset, err := strconv.Atoi(c.Param(\"offset\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresult, err := MyStore.GetPhotosByMonth(offset)\n\n\tif err == nil {\n\t\tc.Header(\"Access-Control-Allow-Origin\", \"*\")\n\t\tc.Header(\"Content-Type\", \"application/json\")\n\t\tc.JSON(http.StatusOK, result)\n\t} else {\n\t\tpanic(err)\n\t}\n}", "func PostPhotoAlbumPhotoHandler(w http.ResponseWriter, r *http.Request) {\n\tfuncTag := \"PostPhotoAlbumPhotoHandler\"\n\n\t// TODO: Get data from body of the request\n\tphoto := &photoDB.Photo{\n\t\tAlbumID: 3,\n\t\tTitle: \"Yeah\",\n\t\tDescription: \"Man\",\n\t\tSrc: \"sam-shortline-candler-grandy-papa-daddy-with-train-12.jpg\",\n\t}\n\n\t// create a transaction\n\ttxo, err := photoDB.NewTxO(\"Test User\")\n\tif err != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"open db transaction\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// create the photo\n\tphoto, err = photoDB.CreatePhoto(txo, photo)\n\tif errTxo := txo.RollbackOnError(err); errTxo != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"create photo\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// commit transaction\n\terr = txo.Commit()\n\tif err != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"commit db transaction\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// build the return data\n\tres := &GetPhotosResponse{}\n\tres.Photos = []*photoDB.Photo{photo}\n\n\t// return\n\tresponder.SendJSON(w, res)\n}", "func (picHdlr *PictureHandler) GetManyPictures(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tidRaw := ps.ByName(\"placeid\")\n\tid, err := strconv.Atoi(idRaw)\n\tif err != nil {\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tuser, err := picHdlr.picSrv.GetManyPictures(id)\n\tif err != nil {\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\toutput, err := json.MarshalIndent(user, \"\", \"\\t\\t\")\n\tif err != nil {\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\tw.Write(output)\n\treturn\n}", "func GetImageMetaData(w http.ResponseWriter, r *http.Request) {\n\n\ttype ResponseModel struct {\n\t\tImageMetaData model.Image\n\t\tComments []model.Comment\n\t}\n\n\t//Response Parameter\n\tvars := mux.Vars(r)\n\timageID := vars[\"imageID\"]\n\n\t//Get Data and make Response\n\timage, err := model.GetImageMetaData(imageID)\n\tif err != nil {\n\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\n\t}\n\n\timage.Likes, err = image.GetLikeCounts()\n\tif err != nil {\n\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\n\t}\n\n\tcomments, err := image.GetComments()\n\tif err != nil {\n\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\n\t}\n\n\trepsoneModel := ResponseModel{\n\t\tImageMetaData: image,\n\t\tComments: comments,\n\t}\n\n\t//Create JSON\n\tresponseJSON, err := json.Marshal(repsoneModel)\n\tif err != nil {\n\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\n\t}\n\n\t//Write Response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(responseJSON)\n\n}", "func (gcs *gcsPhotos) retrievePhotos() (photos []string) {\n qry := &storage.Query {\n MaxResults: 6,\n }\n\tfiles, err := gcs.bucket.List(gcs.ctx, qry)\n\t\tif err != nil {\n\t\t\tlog.Errorf(gcs.ctx, \"listBucketDirMode: unable to list bucket %q: %v\", gcsBucket, err)\n\t\t\treturn\n\t\t}\n\tfor _, name := range files.Results {\n\t\tphotos = append(photos, name.Name)\n\t}\n\treturn\n}", "func picturesMock(w http.ResponseWriter, r *http.Request) {\n\tjson := `{\"copyright\":\"Amir H. Abolfath\",\"date\":\"2019-12-06\",\"explanation\":\"This frame.\",\"hdurl\":\"https://apod.nasa.gov/apod/image/1912/TaurusAbolfath.jpg\",\"media_type\":\"image\",\"service_version\":\"v1\",\"title\":\"Pleiades to Hyades\",\"url\":\"https://apod.nasa.gov/apod/image/1912/TaurusAbolfath1024.jpg\"}`\n\tw.WriteHeader(200)\n\t_, _ = w.Write([]byte(json))\n}", "func (c Client) ListImages() ([]models.Image, error) {\n\tvar images []models.Image\n\tresp, err := c.get(\"/images\")\n\tif err != nil {\n\t\treturn images, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn images, parseError(resp.Body)\n\t}\n\n\tmaybeImages, err := jsonapi.UnmarshalManyPayload(resp.Body, reflect.TypeOf(images))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Convert from []interface{} to []Image\n\timages = make([]models.Image, 0)\n\tfor _, image := range maybeImages {\n\t\ti := image.(*models.Image)\n\t\timages = append(images, *i)\n\t}\n\n\treturn images, nil\n}", "func DecodeStorageImagesGetResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody *vm.Image\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_images_get\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_images_get\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (client ArtifactsClient) listContainerImages(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/container/images\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListContainerImagesResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/registry/20160918/ContainerImageSummary/ListContainerImages\"\n\t\terr = common.PostProcessServiceError(err, \"Artifacts\", \"ListContainerImages\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func GetImageByClothesId(c * gin.Context){\n\tdb := database.DBConn()\n\trows, err := db.Query(\"SELECT * FROM images where clothesId = \"+c.Param(\"id\"))\n\tif err != nil{\n\t\tc.JSON(500, gin.H{\n\t\t\t\"messages\" : \"Story not found\",\n\t\t});\n\t}\n\tpost := DTO.ImageDTO{}\n\tlist := [] DTO.ImageDTO{}\n\tfor rows.Next(){\n\t\tvar id, clothesId int\n\t\tvar link string\n\t\terr = rows.Scan(&id, &link, &clothesId)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tpost.Id = id\n\t\tpost.ClothesId = clothesId\n\t\tpost.Link = link\n\t\tlist = append(list,post)\n\t}\n\tc.JSON(200, list)\n\tdefer db.Close()\n}", "func (a *api) h_GET_images_png_download(c *gin.Context) {\n\timgName := c.Param(\"imgName\")\n\ta.logger.Debug(\"GET /images/\", imgName)\n\n\timd := &image.ImgDesc{}\n\terr := imd.ParseFileName(imgName)\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\n\taCtx := a.getAuthContext(c)\n\tif a.errorResponse(c, aCtx.AuthZCamAccess(imd.CamId, auth.AUTHZ_LEVEL_OA)) {\n\t\treturn\n\t}\n\n\tq := c.Request.URL.Query()\n\twdth, _ := parseInt64QueryParam2(\"width\", q, 0)\n\thght, _ := parseInt64QueryParam2(\"height\", q, 0)\n\n\trdr, err := a.ImageService.GetImageByFileName(imd, int(wdth), int(hght))\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\n\tw := c.Writer\n\tr := c.Request\n\tw.Header().Set(\"Content-Disposition\", \"attachment; filename=\\\"\"+imgName+\"\\\"\")\n\thttp.ServeContent(w, r, imgName, time.Now(), rdr.(io.ReadSeeker))\n}", "func ListAlbum() ([]YearSchema, []AlbumSchema, error) {\n\tvar err error\n\tyearList := []YearSchema{}\n\tif err = configure.SQL.Select(&yearList, `\n\t\tSELECT a.year as year, COUNT(DISTINCT a.id) as event_number, COUNT(p.id) as photo_number\n\t\tFROM Album AS a LEFT JOIN Photo AS p\n\t\tON a.id = p.album_id \n\t\tGROUP BY a.year\n\t`); err != nil {\n\t\tlog.Println(\"Failed on YearSchema\")\n\t}\n\n\tlog.Println(\"yearList: \", yearList)\n\n\tphotoList := []PhotoSchema{}\n\tif err = configure.SQL.Select(&photoList, `\n\t\tSELECT a.id as id, a.year as year, a.title as title, a.date as date, p.path as path\n\t\tFROM Album AS a LEFT JOIN Photo AS p \n\t\tON a.id = p.album_id\n\t`); err != nil {\n\t\tlog.Println(\"Failed on PhotoSchema\")\n\t}\n\n\tlog.Println(\"photoList: \", photoList)\n\n\talbumMap := map[int]AlbumSchema{}\n\tfor _, photo := range photoList {\n\t\tif album, ok := albumMap[photo.AlbumID]; ok {\n\t\t\talbum.PhotoNumber++\n\t\t\talbum.Photos = append(album.Photos, photo.Path)\n\t\t\talbumMap[photo.AlbumID] = album\n\t\t} else {\n\t\t\tnewAlbum := AlbumSchema{\n\t\t\t\tphoto.AlbumID,\n\t\t\t\tphoto.AlbumYear,\n\t\t\t\tphoto.AlbumTitle,\n\t\t\t\tphoto.AlbumDate,\n\t\t\t\t1,\n\t\t\t\t[]string{photo.Path},\n\t\t\t}\n\t\t\talbumMap[newAlbum.ID] = newAlbum\n\t\t}\n\t}\n\talbumList := []AlbumSchema{}\n\tfor _, album := range albumMap {\n\t\talbumList = append(albumList, album)\n\t}\n\tlog.Println(\"albumList: \", albumList)\n\treturn yearList, albumList, err\n}", "func GetAlbumFromAPI(id dna.Int) (*Album, error) {\n\tvar album *Album = NewAlbum()\n\talbum.Id = id\n\tapialbum, err := GetAPIAlbum(id)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tif apialbum.Response.MsgCode == 1 {\n\t\t\tif GetKey(apialbum.Id) != GetKey(album.Id) {\n\t\t\t\terrMes := dna.Sprintf(\"Resulted key and computed key are not match. %v =/= %v , id: %v =/= %v\", GetKey(apialbum.Id), GetKey(album.Id), id, apialbum.Id)\n\t\t\t\tpanic(errMes.String())\n\t\t\t}\n\n\t\t\talbum.Title = apialbum.Title\n\t\t\talbum.Artists = dna.StringArray(apialbum.Artists.Split(\" , \").Map(func(val dna.String, idx dna.Int) dna.String {\n\t\t\t\treturn val.Trim()\n\t\t\t}).([]dna.String)).SplitWithRegexp(\",\").Filter(func(v dna.String, i dna.Int) dna.Bool {\n\t\t\t\tif v != \"\" {\n\t\t\t\t\treturn true\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t})\n\n\t\t\talbum.Topics = dna.StringArray(apialbum.Topics.Split(\", \").Map(func(val dna.String, idx dna.Int) dna.String {\n\t\t\t\treturn val.Trim()\n\t\t\t}).([]dna.String)).SplitWithRegexp(\" / \").Unique().Filter(func(v dna.String, i dna.Int) dna.Bool {\n\t\t\t\tif v != \"\" {\n\t\t\t\t\treturn true\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t})\n\t\t\talbum.Plays = apialbum.Plays\n\t\t\t// album.Songids\n\t\t\t// album.Nsongs\n\t\t\t// album.EncodedKey\n\t\t\t// album.Coverart\n\t\t\t// album.DateCreated\n\t\t\talbum.YearReleased = apialbum.YearReleased\n\t\t\talbum.Description = apialbum.Description.RemoveHtmlTags(\"\")\n\n\t\t\talbum.ArtistIds = apialbum.ArtistIds.Split(\",\").ToIntArray()\n\t\t\talbum.IsAlbum = apialbum.IsAlbum\n\t\t\talbum.IsHit = apialbum.IsHit\n\t\t\talbum.IsOfficial = apialbum.IsOfficial\n\t\t\talbum.Likes = apialbum.Likes\n\t\t\talbum.StatusId = apialbum.StatusId\n\t\t\talbum.Comments = apialbum.Comments\n\t\t\talbum.Checktime = time.Now()\n\t\t\treturn album, nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Message code invalid \" + apialbum.Response.MsgCode.ToString().String())\n\t\t}\n\t}\n}", "func (addon Addon) Images(clusterVersion *version.Version, imageTag string) []string {\n\timages := []string{}\n\tfor _, cb := range addon.getImageCallbacks {\n\t\timage := cb(clusterVersion, imageTag)\n\t\tif image != \"\" {\n\t\t\timages = append(images, image)\n\t\t}\n\t}\n\treturn images\n}", "func (c *APIContext) ImageGet(res web.ResponseWriter, req *web.Request) {\n\n\tid := req.PathParams[\"image_id\"]\n\timg, err := models.LoadImage(c.Database, req.PathParams[\"image_id\"])\n\tif err != nil {\n\t\tif err == mgo.ErrNotFound {\n\t\t\tres.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Printf(\"Error while loading image %s from database for user %s (%s)\", id, c.User, err)\n\t\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Write it out as json\n\tdata, err := json.Marshal(img)\n\tif err != nil {\n\t\tlog.Printf(\"Error while marshalling image %s to JSON for user %s (%s)\", id, c.User, err)\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tres.WriteHeader(http.StatusOK)\n\tfmt.Fprint(res, string(data[:]))\n\n}", "func (in *Database) GetImages() ([]*types.Image, error) {\n\trec := []*types.Image{}\n\ttxn := in.db.Txn(false)\n\tdefer txn.Abort()\n\tit, err := txn.Get(\"image\", \"id\")\n\tif err != nil {\n\t\treturn rec, err\n\t}\n\tfor obj := it.Next(); obj != nil; obj = it.Next() {\n\t\trec = append(rec, obj.(*types.Image))\n\t}\n\treturn rec, nil\n}", "func (client *GalleryImagesClient) listByGalleryHandleResponse(resp *http.Response) (GalleryImagesClientListByGalleryResponse, error) {\n\tresult := GalleryImagesClientListByGalleryResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GalleryImageList); err != nil {\n\t\treturn GalleryImagesClientListByGalleryResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GalleryImagesClient) getHandleResponse(resp *http.Response) (GalleryImagesClientGetResponse, error) {\n\tresult := GalleryImagesClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GalleryImage); err != nil {\n\t\treturn GalleryImagesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func GetBookmarkedImages(ctx *gin.Context) {\n\n\tuserToken, _ := ctx.Get(\"user_token\")\n\tassertedUserToken := userToken.(models.UserToken)\n\n\tpaginator, _ := ctx.Get(\"paginator\")\n\tassertedPaginator := paginator.(inputs.Paginator)\n\n\tctx.JSON(http.StatusOK, services.GetBookmarkedImages(&assertedUserToken.User, &assertedPaginator))\n}", "func ListContainerImageTags(settings *playfab.Settings, postData *ListContainerImageTagsRequestModel, entityToken string) (*ListContainerImageTagsResponseModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/MultiplayerServer/ListContainerImageTags\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &ListContainerImageTagsResponseModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func CheckinPeopleImageGET(w http.ResponseWriter, r *http.Request) {\n\tid := r.URL.Query().Get(\"id\")\n image_id := r.URL.Query().Get(\"image_id\")\n\n var image string\n if image_id == \"\" {\n p, err := people.GetPerson(id)\n if err != nil {\n Error(w, err, http.StatusInternalServerError)\n return\n }\n image = p.Image\n } else {\n imgs, err := images.GetImages(id, image_id)\n if err != nil || len(imgs.Images) != 1 {\n Error(w, err, http.StatusInternalServerError)\n return\n }\n image = imgs.Images[0]\n }\n\n by, err := base64.StdEncoding.DecodeString(image)\n if err != nil {\n Error(w, err, http.StatusInternalServerError)\n return\n }\n\tw.Header().Set(\"Content-Type\", \"image/*\")\n\tw.Write(by)\n}", "func (r *AlbumsService) Get(albumId string) *AlbumsGetCall {\n\tc := &AlbumsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.albumId = albumId\n\treturn c\n}" ]
[ "0.78062147", "0.75835025", "0.7488234", "0.7022131", "0.69949764", "0.6842922", "0.68154794", "0.66601187", "0.6651162", "0.6643078", "0.652421", "0.65002805", "0.6458088", "0.64554036", "0.6439885", "0.64233345", "0.63966346", "0.6379913", "0.6376665", "0.6360784", "0.6358435", "0.63551986", "0.6300538", "0.62950176", "0.62835944", "0.6267008", "0.61756814", "0.61743057", "0.61183137", "0.6045625", "0.60350734", "0.60327876", "0.60312265", "0.60188895", "0.6007925", "0.6007925", "0.5997986", "0.59879875", "0.59631586", "0.59287333", "0.59287333", "0.5896582", "0.5893012", "0.5862044", "0.58408356", "0.5822884", "0.58051044", "0.57933056", "0.5765624", "0.5759973", "0.5747452", "0.5745877", "0.573164", "0.57291955", "0.57164234", "0.5692894", "0.5681963", "0.5671978", "0.5660546", "0.56568986", "0.56526154", "0.56485516", "0.56278515", "0.56221753", "0.5620395", "0.56174463", "0.5616907", "0.5594976", "0.5576256", "0.5568901", "0.5564481", "0.5561738", "0.5555675", "0.5553241", "0.5543053", "0.554079", "0.55346125", "0.55278033", "0.55231506", "0.5509099", "0.5500451", "0.5492749", "0.54821974", "0.5479397", "0.5462187", "0.54176164", "0.5416251", "0.53973705", "0.53921574", "0.53919435", "0.53912497", "0.5388015", "0.5384712", "0.53829587", "0.5378407", "0.53778833", "0.5368085", "0.5367954", "0.5363919", "0.5359552" ]
0.7720157
1
GetLinkClient is a function is to grab the tasking link for the client
func GetLinkClient(albumID string, clientID string) (imageLink string) { // This hash is the albumID hash url := "https://api.imgur.com/3/album/" + albumID + "/images" method := "GET" payload := &bytes.Buffer{} writer := multipart.NewWriter(payload) err := writer.Close() if err != nil { fmt.Println(err) } client := &http.Client{} req, err := http.NewRequest(method, url, payload) if err != nil { fmt.Println(err) } req.Header.Add("Authorization", "Client-ID "+clientID) req.Header.Set("Content-Type", writer.FormDataContentType()) res, err := client.Do(req) if err != nil { fmt.Println("[-] Error connecting:", err) } defer res.Body.Close() body, err := ioutil.ReadAll(res.Body) var results AlbumImages errr := json.Unmarshal([]byte(body), &results) if errr != nil { fmt.Println("[!] Error unmarshalling::", errr) } datavalues := results.Data if results.Success == true { imageLink = datavalues[0].Link } return imageLink }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetLink(key string) (string, error) {\n\treturn RedisClient.Get(Ctx, key).Result()\n}", "func OIDCGetLink(discoveryURL, clientID, clientSecret, redirectURL string) (string, error) {\n\tctx := context.Background()\n\tprovider, err := oidc.NewProvider(ctx, discoveryURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\toauth2Config := oauth2.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tRedirectURL: redirectURL,\n\t\tEndpoint: provider.Endpoint(),\n\t\tScopes: []string{oidc.ScopeOpenID},\n\t}\n\n\treturn fmt.Sprintf(\"{\\\"url\\\": \\\"%s\\\"}\", oauth2Config.AuthCodeURL(\"\", oauth2.AccessTypeOffline, oauth2.ApprovalForce)), nil\n}", "func (a *Client) GetLink(params *GetLinkParams) (*GetLinkOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetLinkParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getLink\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/links/{itemName}/{channelUID}\",\n\t\tProducesMediaTypes: []string{\"\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetLinkReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetLinkOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for getLink: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func ExampleLinkerClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicelinker.NewClientFactory(cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewLinkerClient().Get(ctx, \"subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app\", \"linkName\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.LinkerResource = armservicelinker.LinkerResource{\n\t// \tName: to.Ptr(\"linkName\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceLinker/links\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app/providers/Microsoft.ServiceLinker/links/linkName\"),\n\t// \tProperties: &armservicelinker.LinkerProperties{\n\t// \t\tAuthInfo: &armservicelinker.SecretAuthInfo{\n\t// \t\t\tAuthType: to.Ptr(armservicelinker.AuthTypeSecret),\n\t// \t\t\tName: to.Ptr(\"name\"),\n\t// \t\t},\n\t// \t\tTargetService: &armservicelinker.AzureResource{\n\t// \t\t\tType: to.Ptr(armservicelinker.TargetServiceTypeAzureResource),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DocumentDb/databaseAccounts/test-acc/mongodbDatabases/test-db\"),\n\t// \t\t},\n\t// \t},\n\t// \tSystemData: &armservicelinker.SystemData{\n\t// \t\tCreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-07-12T22:05:09Z\"); return t}()),\n\t// \t},\n\t// }\n}", "func (i IntransitiveActivity) GetLink() IRI {\n\treturn IRI(i.ID)\n}", "func (mc *mgmtClient) getLinkWithoutLock(ctx context.Context) (RPCLink, error) {\n\tif mc.rpcLink != nil {\n\t\treturn mc.rpcLink, nil\n\t}\n\n\tvar err error\n\tmc.rpcLink, err = mc.ns.NewRPCLink(ctx, mc.links.ManagementPath())\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn mc.rpcLink, nil\n}", "func getLink(header http.Header, rel string) string {\n\tlinks := getLinks(header, rel)\n\tif len(links) < 1 {\n\t\treturn \"\"\n\t}\n\n\treturn links[0]\n}", "func GetClientURL(localEndpoint *kubeadmapi.APIEndpoint) string {\n\treturn \"https://\" + net.JoinHostPort(localEndpoint.AdvertiseAddress, strconv.Itoa(constants.EtcdListenClientPort))\n}", "func GetLink(ctx context.Context, client dynamic.Interface, namespace, name string) (Link, error) {\n\tunstructured, err := client.Resource(LinkGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn Link{}, err\n\t}\n\treturn NewLink(*unstructured)\n}", "func (ac *azureClient) GetLink(ctx context.Context, resourceGroupName, zoneName, vnetLinkName string) (privatedns.VirtualNetworkLink, error) {\n\tctx, _, done := tele.StartSpanWithLogger(ctx, \"privatedns.AzureClient.GetLink\")\n\tdefer done()\n\tvnetLink, err := ac.vnetlinks.Get(ctx, resourceGroupName, zoneName, vnetLinkName)\n\tif err != nil {\n\t\treturn privatedns.VirtualNetworkLink{}, err\n\t}\n\treturn vnetLink, nil\n}", "func getClient(url string, groupID uint) (*client.Client) {\n\t// RPC API\n\tc, err := client.Dial(url, groupID) // change to your RPC and groupID\n\tif err != nil {\n fmt.Println(\"can not dial to the RPC API, please check the config file gobcos_config.yaml: \", err)\n os.Exit(1)\n\t}\n\treturn c\n}", "func (s *StanServer) ClientURL() string {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.providedServerURL != \"\" {\n\t\treturn s.providedServerURL\n\t} else if s.natsServer != nil {\n\t\treturn s.natsServer.ClientURL()\n\t} else {\n\t\treturn \"\"\n\t}\n}", "func ExamplePrivateLinkResourcesClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armpowerbiprivatelinks.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewPrivateLinkResourcesClient().Get(ctx, \"resourceGroup\", \"azureResourceName\", \"tenant\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.PrivateLinkResource = armpowerbiprivatelinks.PrivateLinkResource{\n\t// \tName: to.Ptr(\"tenant\"),\n\t// \tType: to.Ptr(\"Microsoft.PowerBI/{resourceType}/privateLinkResources\"),\n\t// \tID: to.Ptr(\"subscriptions/a0020869-4d28-422a-89f4-c2413130d73c/resourceGroups/resourceGroup/providers/Microsoft.PowerBI/privateLinkServicesForPowerBI/azureResourceName/privateLinkResources/tenant\"),\n\t// \tProperties: &armpowerbiprivatelinks.PrivateLinkResourceProperties{\n\t// \t\tGroupID: to.Ptr(\"tenant\"),\n\t// \t\tRequiredMembers: []*string{\n\t// \t\t\tto.Ptr(\"tenant\"),\n\t// \t\t\tto.Ptr(\"capacity:3df897a4f10b49e9bddb0e9cf062adba\")},\n\t// \t\t\tRequiredZoneNames: []*string{\n\t// \t\t\t\tto.Ptr(\"privatelink.powerbi.com\")},\n\t// \t\t\t},\n\t// \t\t}\n}", "func (generator *Generator) GetRedirectLink(userId string) (string, error) {\n\tu := uuid.New().String()\n\thash := getHashOf(u)\n\turl := fmt.Sprintf(\"%s/api/auth/link/%s?u=%s&h=%s\", generator.baseAddress, userId, u, hash)\n\treturn url, nil\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsCallerSession) LINK() (common.Address, error) {\n\treturn _UpkeepRegistrationRequests.Contract.LINK(&_UpkeepRegistrationRequests.CallOpts)\n}", "func GetLink(path string) (string, error) {\n\t// Prepare data\n\tdata := url.Values{}\n\tdata.Set(\"path\", path)\n\tdata.Add(\"read\", \"1\")\n\tdata.Add(\"list\", \"1\")\n\n\t// Prepare the request (URL, auth, headers)\n\turl := viper.GetString(\"smartFile.url\") + viper.GetString(\"smartFile.api.link\")\n\tbody := strings.NewReader(data.Encode())\n\tr, err := http.NewRequest(\"POST\", url, body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tr.Close = true\n\tr.SetBasicAuth(viper.GetString(\"smartFile.keys.public\"), viper.GetString(\"smartFile.keys.private\"))\n\tr.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tr.Header.Add(\"Content-Length\", strconv.Itoa(len(data.Encode())))\n\n\t// Execute the upload\n\tc := &http.Client{Timeout: time.Second * 20}\n\tresp, err := c.Do(r)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tl := structs.SmartFileLink{}\n\terr = json.Unmarshal(b, &l)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn l.Href, nil\n}", "func (c *Collector) Link() string {\n\treturn URLForResults(c.referenceID, c.config)\n}", "func (opa *client) getURL(path string) string {\n\treturn fmt.Sprintf(\"%s/%s\", opa.Host, path)\n}", "func (d *directory) getLink(typ Link, provisionerName string, abs bool, inputs ...string) string {\n\tvar link string\n\tswitch typ {\n\tcase NewNonceLink, NewAccountLink, NewOrderLink, NewAuthzLink, DirectoryLink, KeyChangeLink, RevokeCertLink:\n\t\tlink = fmt.Sprintf(\"/%s/%s\", provisionerName, typ.String())\n\tcase AccountLink, OrderLink, AuthzLink, ChallengeLink, CertificateLink:\n\t\tlink = fmt.Sprintf(\"/%s/%s/%s\", provisionerName, typ.String(), inputs[0])\n\tcase OrdersByAccountLink:\n\t\tlink = fmt.Sprintf(\"/%s/%s/%s/orders\", provisionerName, AccountLink.String(), inputs[0])\n\tcase FinalizeLink:\n\t\tlink = fmt.Sprintf(\"/%s/%s/%s/finalize\", provisionerName, OrderLink.String(), inputs[0])\n\t}\n\tif abs {\n\t\treturn fmt.Sprintf(\"https://%s/%s%s\", d.dns, d.prefix, link)\n\t}\n\treturn link\n}", "func (m *Reminder) GetEventWebLink()(*string) {\n val, err := m.GetBackingStore().Get(\"eventWebLink\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func ExamplePrivateLinkResourcesClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armcosmosforpostgresql.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewPrivateLinkResourcesClient().Get(ctx, \"TestGroup\", \"testcluster\", \"plr\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.PrivateLinkResource = armcosmosforpostgresql.PrivateLinkResource{\n\t// \tName: to.Ptr(\"plr\"),\n\t// \tType: to.Ptr(\"Microsoft.DBforPostgreSQL/serverGroupsv2/privateLinkResources\"),\n\t// \tID: to.Ptr(\"/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestResourceGroup/providers/Microsoft.DBforPostgreSQL/serverGroupsv2/testcluster/privateLinkResources/plr\"),\n\t// \tSystemData: &armcosmosforpostgresql.SystemData{\n\t// \t\tCreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2022-01-01T17:18:19.1234567Z\"); return t}()),\n\t// \t\tCreatedBy: to.Ptr(\"user1\"),\n\t// \t\tCreatedByType: to.Ptr(armcosmosforpostgresql.CreatedByTypeUser),\n\t// \t\tLastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2022-01-02T17:18:19.1234567Z\"); return t}()),\n\t// \t\tLastModifiedBy: to.Ptr(\"user2\"),\n\t// \t\tLastModifiedByType: to.Ptr(armcosmosforpostgresql.CreatedByTypeUser),\n\t// \t},\n\t// \tProperties: &armcosmosforpostgresql.PrivateLinkResourceProperties{\n\t// \t\tGroupID: to.Ptr(\"coordinator\"),\n\t// \t\tRequiredMembers: []*string{\n\t// \t\t\tto.Ptr(\"coordinator\")},\n\t// \t\t\tRequiredZoneNames: []*string{\n\t// \t\t\t\tto.Ptr(\"privatelink.testcluster.postgres.database.azure.com\")},\n\t// \t\t\t},\n\t// \t\t}\n}", "func (lm LinksManager) HTTPClient() *http.Client {\n\treturn lm.Client\n}", "func getLinkPage(title string, targetAPI *Config, plcontinue *string) (*LinkResponse, error) {\n\tu, _ := url.Parse(targetAPI.APIRoot)\n\tu.Scheme = targetAPI.Protocol\n\n\tq := u.Query()\n\tq.Set(\"action\", \"query\")\n\tq.Set(\"titles\", title)\n\tq.Set(\"prop\", \"links\")\n\tq.Set(\"pllimit\", \"max\")\n\tq.Set(\"format\", \"json\")\n\n\tif plcontinue != nil {\n\t\tq.Set(\"plcontinue\", *plcontinue)\n\t}\n\n\tu.RawQuery = q.Encode()\n\n\tres, reqErr := http.Get(u.String())\n\n\tif reqErr != nil {\n\t\tfmt.Println(\"Request failed!\")\n\t\treturn nil, reqErr\n\t}\n\n\tdefer res.Body.Close()\n\n\tbody, readBodyErr := ioutil.ReadAll(res.Body)\n\tif readBodyErr != nil {\n\t\tfmt.Println(\"Can't read response body!\")\n\t\treturn nil, readBodyErr\n\t}\n\n\tdata := LinkResponse{}\n\tjsonParseErr := json.Unmarshal(body, &data)\n\tif jsonParseErr != nil {\n\t\tfmt.Println(\"Invalid json!\")\n\t\treturn nil, readBodyErr\n\t}\n\n\treturn &data, nil\n}", "func ExampleLinkerClient_BeginCreateOrUpdate_putLink() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicelinker.NewClientFactory(cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewLinkerClient().BeginCreateOrUpdate(ctx, \"subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app\", \"linkName\", armservicelinker.LinkerResource{\n\t\tProperties: &armservicelinker.LinkerProperties{\n\t\t\tAuthInfo: &armservicelinker.SecretAuthInfo{\n\t\t\t\tAuthType: to.Ptr(armservicelinker.AuthTypeSecret),\n\t\t\t\tName: to.Ptr(\"name\"),\n\t\t\t\tSecretInfo: &armservicelinker.ValueSecretInfo{\n\t\t\t\t\tSecretType: to.Ptr(armservicelinker.SecretTypeRawValue),\n\t\t\t\t\tValue: to.Ptr(\"secret\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tTargetService: &armservicelinker.AzureResource{\n\t\t\t\tType: to.Ptr(armservicelinker.TargetServiceTypeAzureResource),\n\t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/servers/test-pg/databases/test-db\"),\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.LinkerResource = armservicelinker.LinkerResource{\n\t// \tName: to.Ptr(\"linkName\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceLinker/links\"),\n\t// \tProperties: &armservicelinker.LinkerProperties{\n\t// \t\tAuthInfo: &armservicelinker.SecretAuthInfo{\n\t// \t\t\tAuthType: to.Ptr(armservicelinker.AuthTypeSecret),\n\t// \t\t\tName: to.Ptr(\"name\"),\n\t// \t\t},\n\t// \t\tTargetService: &armservicelinker.AzureResource{\n\t// \t\t\tType: to.Ptr(armservicelinker.TargetServiceTypeAzureResource),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/servers/test-pg/databases/test-db\"),\n\t// \t\t},\n\t// \t},\n\t// }\n}", "func OptClientLinkLayerAddress(ht iana.HWType, lla net.HardwareAddr) *optClientLinkLayerAddress {\n\treturn &optClientLinkLayerAddress{LinkLayerType: ht, LinkLayerAddress: lla}\n}", "func (a *Client) Link(params *LinkParams) (*LinkOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewLinkParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"link\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/links/{itemName}/{channelUID}\",\n\t\tProducesMediaTypes: []string{\"\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &LinkReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*LinkOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for link: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (n *Node) GetClient() *avalanchegoclient.Client {\n\treturn n.client\n}", "func ClientGet(c models.Client, m *models.Message) {\n\tif c.ID <= 0 {\n\t\tm.Code = http.StatusBadRequest\n\t\tm.Message = \"especifique cliente\"\n\t\treturn\n\t}\n\tdb := configuration.GetConnection()\n\tdefer db.Close()\n\terr := getClient(&c, db)\n\tif err != nil {\n\t\tm.Code = http.StatusBadRequest\n\t\tm.Message = \"no se encotro cliente\"\n\t\treturn\n\t}\n\tc.ClientTel = []models.ClientTel{{CodClient: c.ID}}\n\terr = getClientTelList(&c.ClientTel, db)\n\tc.Loan = []models.Loan{{CodCollection: c.CodCollection, CodClient: c.ID}}\n\terr = getLoanList(&c.Loan, db)\n\tc.User.ID = c.CodUser\n\terr = getUserShort(&c.User, db)\n\tm.Code = http.StatusOK\n\tm.Message = \"cliente creado\"\n\tm.Data = c\n}", "func getTargetLink(resp map[string]interface{}) (targetLink string, hasTargetLink bool) {\n\ttargetLink, hasTargetLink = resp[\"targetLink\"].(string)\n\treturn\n}", "func (client DatabasesClient) GetReplicationLinkSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req)\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsSession) LINK() (common.Address, error) {\n\treturn _UpkeepRegistrationRequests.Contract.LINK(&_UpkeepRegistrationRequests.CallOpts)\n}", "func (b *Bot) GetFriendLink() string {\n\trequest, _ := http.NewRequest(\"GET\", fmt.Sprintf(\"https://admin-official.line.me/%v/account/\", b.BotId), nil)\n\tresponse, _ := b.client.Do(request)\n\tdefer response.Body.Close()\n\tdoc, _ := goquery.NewDocumentFromResponse(response)\n\tsrc, _ := doc.Find(\"div.mdCMN08Img\").Eq(1).Find(\"a\").Attr(\"href\")\n\treturn src\n}", "func generateLink(lh *linkHandler) tea.Cmd {\n\treturn func() tea.Msg {\n\t\tselect {\n\t\tcase err := <-lh.err:\n\t\t\treturn errMsg{err}\n\t\tcase tok := <-lh.token:\n\t\t\treturn linkTokenCreatedMsg(tok)\n\t\t}\n\t}\n}", "func ExampleLinkerClient_BeginDelete() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicelinker.NewClientFactory(cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewLinkerClient().BeginDelete(ctx, \"subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app\", \"linkName\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t_, err = poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n}", "func (m *MockC2Client) LinkClient(arg0 context.Context, arg1 *pb.LinkClientRequest, arg2 ...grpc.CallOption) (*pb.LinkClientResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"LinkClient\", varargs...)\n\tret0, _ := ret[0].(*pb.LinkClientResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func ExampleLinkerClient_BeginValidate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicelinker.NewClientFactory(cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewLinkerClient().BeginValidate(ctx, \"subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app\", \"linkName\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ValidateOperationResult = armservicelinker.ValidateOperationResult{\n\t// \tProperties: &armservicelinker.ValidateResult{\n\t// \t\tAuthType: to.Ptr(armservicelinker.AuthTypeSecret),\n\t// \t\tIsConnectionAvailable: to.Ptr(true),\n\t// \t\tLinkerName: to.Ptr(\"linkName\"),\n\t// \t\tReportEndTimeUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-07-12T22:06:09Z\"); return t}()),\n\t// \t\tReportStartTimeUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-07-12T22:05:09Z\"); return t}()),\n\t// \t\tSourceID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DocumentDb/databaseAccounts/test-acc/mongodbDatabases/test-db\"),\n\t// \t\tTargetID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DocumentDb/databaseAccounts/test-acc/mongodbDatabases/test-db\"),\n\t// \t\tValidationDetail: []*armservicelinker.ValidationResultItem{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"TargetExistence\"),\n\t// \t\t\t\tDescription: to.Ptr(\"The target existence is validated\"),\n\t// \t\t\t\tResult: to.Ptr(armservicelinker.ValidationResultStatusSuccess),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"TargetNetworkAccess\"),\n\t// \t\t\t\tDescription: to.Ptr(\"Deny public network access is set to yes. Please confirm you are using private endpoint connection to access target resource.\"),\n\t// \t\t\t\tResult: to.Ptr(armservicelinker.ValidationResultStatusWarning),\n\t// \t\t}},\n\t// \t},\n\t// }\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsCaller) LINK(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _UpkeepRegistrationRequests.contract.Call(opts, &out, \"LINK\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (m *WorkforceIntegration) GetUrl()(*string) {\n return m.url\n}", "func GetLink(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *LinkState, opts ...pulumi.ResourceOption) (*Link, error) {\n\tvar resource Link\n\terr := ctx.ReadResource(\"azure-native:customerinsights:Link\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (a *Action) GetCommentLink() string {\n\treturn a.getCommentLink(db.DefaultContext)\n}", "func get_link(cache *cache.Cache, db *dropbox.Dropbox, path string) string {\n\n\t// Use caching to reduce calls to the Dropbox API\n\tcache_path := strings.Join([]string{\"link\", path}, \":\")\n\tdata, found := cache.Get(cache_path)\n\tif found {\n\t\tif cached, ok := data.(string); ok {\n\t\t\treturn cached\n\t\t} else {\n\t\t\tlog.Println(\"Error: Unable to retrieve from cache\")\n\t\t}\n\t}\n\n\tlink, err := db.Shares(path, false)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\"\n\t}\n\tcache.Set(cache_path, link.URL, 0)\n\treturn link.URL\n}", "func (p Processor) GetLoginLink(ctx context.Context, in *api.GetLoginLinkRequest) (*api.LoginLinkResponse, error) {\n\ttelegramId := in.TelegramId\n\tuser, err := p.repository.GetByTelegramId(ctx, int(telegramId))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\turl, err := p.linkGenerator.GetRedirectLink(user.Id.Hex())\n\texpiration := time.Now().Add(time.Minute * 15)\n\terr = p.repository.UpdateLoginLink(ctx, user.Id, url, expiration)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &api.LoginLinkResponse{\n\t\tUrl: url,\n\t\tExpiration: expiration.Unix(),\n\t}, nil\n}", "func (a *Api) getDeleteLink(w http.ResponseWriter, r *http.Request) {\n\taid, ok := r.Context().Value(\"account_id\").(string)\n\tif !ok {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\taccountId, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif accountId != aid {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tlinkId, ok := vars[\"link_id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr := a.LinkUseCases.LoggerDeleteLink(a.LinkUseCases.DeleteLink)(linkId, aid)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}", "func RelLink(resp *http.Response, lnk string) string {\n\tfor _, link := range resp.Header.Values(\"Link\") {\n\t\tfor _, match := range relLinkExp.FindAllStringSubmatch(link, -1) {\n\t\t\tif match[2] == lnk {\n\t\t\t\treturn match[1]\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (id *HashChainLinkIdentity) ClientID() (string, error) {\n\tif id == nil {\n\t\treturn \"\", errors.New(errors.KsiInvalidArgumentError)\n\t}\n\treturn id.clientID, nil\n}", "func (a *Action) GetClient() models.Client {\n\treturn a.Raptor.GetClient()\n}", "func clientRequest(client *httpbakery.Client, serverEndpoint string) (string, error) {\n\t// The Do function implements the mechanics\n\t// of actually gathering discharge macaroons\n\t// when required, and retrying the request\n\t// when necessary.\n\treq, err := http.NewRequest(\"GET\", serverEndpoint, nil)\n\tif err != nil {\n\t\treturn \"\", errgo.Notef(err, \"cannot make new HTTP request\")\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn \"\", errgo.NoteMask(err, \"GET failed\", errgo.Any)\n\t}\n\tdefer resp.Body.Close()\n\t// TODO(rog) unmarshal error\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot read response: %v\", err)\n\t}\n\treturn string(data), nil\n}", "func makeLink(t *testing.T) (client *Client) {\n\t// start a server\n\tserver := NewServer()\n\terr := server.Register(new(StreamingArith))\n\tif err != nil {\n\t\tt.Fatal(\"Register failed\", err)\n\t}\n\n\t// listen and handle queries\n\tvar l net.Listener\n\tl, serverAddr = listenTCP()\n\tlog.Println(\"Test RPC server listening on\", serverAddr)\n\tgo server.Accept(l)\n\n\t// dial the client\n\tclient, err = Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(\"dialing\", err)\n\t}\n\n\treturn\n}", "func GetLinks(ctx context.Context, client dynamic.Interface) ([]Link, error) {\n\tlist, err := client.Resource(LinkGVR).List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlinks := []Link{}\n\terrs := []string{}\n\tfor _, u := range list.Items {\n\t\tlink, err := NewLink(u)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"failed to parse Link %s: %s\", u.GetName(), err))\n\t\t} else {\n\t\t\tlinks = append(links, link)\n\t\t}\n\t}\n\tif len(errs) > 0 {\n\t\treturn nil, errors.New(strings.Join(errs, \"\\n\"))\n\t}\n\treturn links, nil\n}", "func (e *ExportedStoryLink) GetLink() (value string) {\n\tif e == nil {\n\t\treturn\n\t}\n\treturn e.Link\n}", "func (m *ManagementTemplateStep) GetPortalLink()(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.ActionUrlable) {\n val, err := m.GetBackingStore().Get(\"portalLink\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.ActionUrlable)\n }\n return nil\n}", "func (c *Collector) Link() string {\n\treturn \"\"\n}", "func GetLinkForMessage(s string) string {\n\tif len(strings.Split(s, \" \")) > 1 && strings.Split(s, \" \")[1] != \"\" {\n\t\tif strings.Split(s, \" \")[1] == \"help\" {\n\t\t\tlinks := ReadAllLinks()\n\t\t\tkeys := make([]string, 0, len(links))\n\t\t\tfor _, link := range links {\n\t\t\t\tkeys = append(keys, link.Descrip)\n\t\t\t}\n\n\t\t\treturn fmt.Sprintf(\"```Available links are: \\n!link %s ```\", strings.Join(keys, \"\\n!link \"))\n\t\t}\n\t\tlinks := ReadAllLinks(strings.Split(s, \" \")[1])\n\t\tif len(links) > 0 {\n\t\t\tretString := \"Heres what I found!\\n\"\n\t\t\tfor _, link := range links {\n\t\t\t\tretString += fmt.Sprintf(\"%s : %s \\n\", link.Descrip, link.Link)\n\t\t\t}\n\t\t\treturn retString\n\t\t}\n\t}\n\treturn \"```Invalid argument for command !link for valid options try \\n try !link help ```\"\n}", "func (m *SocialIdentityProvider) GetClientId()(*string) {\n return m.clientId\n}", "func ExampleLinkerClient_BeginCreateOrUpdate_putLinkWithServiceEndpoint() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicelinker.NewClientFactory(cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewLinkerClient().BeginCreateOrUpdate(ctx, \"subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app\", \"linkName\", armservicelinker.LinkerResource{\n\t\tProperties: &armservicelinker.LinkerProperties{\n\t\t\tAuthInfo: &armservicelinker.SecretAuthInfo{\n\t\t\t\tAuthType: to.Ptr(armservicelinker.AuthTypeSecret),\n\t\t\t\tName: to.Ptr(\"name\"),\n\t\t\t\tSecretInfo: &armservicelinker.KeyVaultSecretURISecretInfo{\n\t\t\t\t\tSecretType: to.Ptr(armservicelinker.SecretTypeKeyVaultSecretURI),\n\t\t\t\t\tValue: to.Ptr(\"https://vault-name.vault.azure.net/secrets/secret-name/00000000000000000000000000000000\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tTargetService: &armservicelinker.AzureResource{\n\t\t\t\tType: to.Ptr(armservicelinker.TargetServiceTypeAzureResource),\n\t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/servers/test-pg/databases/test-db\"),\n\t\t\t},\n\t\t\tVNetSolution: &armservicelinker.VNetSolution{\n\t\t\t\tType: to.Ptr(armservicelinker.VNetSolutionTypeServiceEndpoint),\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.LinkerResource = armservicelinker.LinkerResource{\n\t// \tName: to.Ptr(\"linkName\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceLinker/links\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app/providers/Microsoft.ServiceLinker/links/linkName\"),\n\t// \tProperties: &armservicelinker.LinkerProperties{\n\t// \t\tAuthInfo: &armservicelinker.SecretAuthInfo{\n\t// \t\t\tAuthType: to.Ptr(armservicelinker.AuthTypeSecret),\n\t// \t\t\tName: to.Ptr(\"name\"),\n\t// \t\t},\n\t// \t\tTargetService: &armservicelinker.AzureResource{\n\t// \t\t\tType: to.Ptr(armservicelinker.TargetServiceTypeAzureResource),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/servers/test-pg/databases/test-db\"),\n\t// \t\t},\n\t// \t\tVNetSolution: &armservicelinker.VNetSolution{\n\t// \t\t\tType: to.Ptr(armservicelinker.VNetSolutionTypeServiceEndpoint),\n\t// \t\t},\n\t// \t},\n\t// }\n}", "func (a *Action) GetRepoLink() string {\n\t// path.Join will skip empty strings\n\treturn path.Join(setting.AppSubURL, \"/\", url.PathEscape(a.GetRepoUserName()), url.PathEscape(a.GetRepoName()))\n}", "func (amv *AMV) Link() string {\n\treturn \"/amv/\" + amv.ID\n}", "func (ctl *taskController) GetClient(timeout time.Duration) (*http.Client, error) {\n\t// TODO(vadimsh): Use per-project service accounts, not a global cron service\n\t// account.\n\tctx, _ := clock.WithTimeout(ctl.ctx, timeout)\n\ttransport, err := client.Transport(ctx, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &http.Client{Transport: transport}, nil\n}", "func (m *Mockclient) GetLink(arg0 context.Context, arg1, arg2, arg3 string) (privatedns.VirtualNetworkLink, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLink\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(privatedns.VirtualNetworkLink)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Readlink(name string) (string, error)", "func RetrieveLink(w http.ResponseWriter, r *http.Request) {\n\tvar obj Link = Link{Hash: mux.Vars(r)[\"hash\"]}\n\tglobals.Database.Take(&obj, \"hash = ?\", obj.Hash)\n\tjson.NewEncoder(w).Encode(obj)\n\tif !globals.Prod {\n\t\tfmt.Println(\"request: retrieve link\")\n\t}\n\treturn\n}", "func (o *OAuthApp) GetLink() string {\n\tif o == nil || o.Link == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Link\n}", "func (rfi *RemoteFeedItem) DownloadLink(extractor FeedExtractor) string {\n\treturn extractor.Extract(rfi)\n}", "func URLToClient(session *network.Session) {\n\tvar cash_url = g_ServerConfig.CashWeb_URL\n\tvar cash_odc_url = g_ServerConfig.CashWeb_Odc_URL\n\tvar cash_charge_url = g_ServerConfig.CashWeb_Charge_URL\n\tvar guildweb_url = g_ServerConfig.GuildWeb_URL\n\tvar sns_url = g_ServerConfig.Sns_URL\n\n\tvar dataLen = len(cash_url) + 4\n\tdataLen += len(cash_odc_url) + 4\n\tdataLen += len(cash_charge_url) + 4\n\tdataLen += len(guildweb_url) + 4\n\tdataLen += len(sns_url) + 4\n\n\tvar packet = network.NewWriter(URLTOCLIENT)\n\tpacket.WriteInt16(dataLen + 2)\n\tpacket.WriteInt16(dataLen)\n\tpacket.WriteInt32(len(cash_url))\n\tpacket.WriteString(cash_url)\n\tpacket.WriteInt32(len(cash_odc_url))\n\tpacket.WriteString(cash_odc_url)\n\tpacket.WriteInt32(len(cash_charge_url))\n\tpacket.WriteString(cash_charge_url)\n\tpacket.WriteInt32(len(guildweb_url))\n\tpacket.WriteString(guildweb_url)\n\tpacket.WriteInt32(len(sns_url))\n\tpacket.WriteString(sns_url)\n\n\tsession.Send(packet)\n}", "func (p *Pool) getClient() *ClientConn {\n\tvar bestConn *ClientConn\n\tfor i, client := range p.clients {\n\t\tif client.inUse < p.opt.UsedPreConn && (bestConn == nil || client.inUse < bestConn.inUse) {\n\t\t\tif client.active() {\n\t\t\t\tbestConn = p.clients[i]\n\t\t\t} else {\n\t\t\t\tgo client.waitForReady()\n\t\t\t}\n\t\t}\n\t}\n\n\tif bestConn != nil {\n\t\treturn bestConn\n\t}\n\n\tnewConn := p.addUsedClient()\n\tif newConn != nil {\n\t\tif newConn.active() {\n\t\t\treturn newConn\n\t\t}\n\t\tgo newConn.waitForReady()\n\t}\n\n\treturn nil\n}", "func checklink(link string, c chan string) {\n\tresp, err := http.Get(link)\n\n\tif err != nil {\n\t\tfmt.Println(link, \"might be down! because of the Reason : \", err, \"at time: \", time.Now().Format(\"2006-01-02 3:4:5 pm\"))\n\t\tc <- link\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tfmt.Println(\"Hurrah\", link, \"is up! at time: \", time.Now().Format(\"2006-01-02 3:4:5 pm\"))\n\tc <- link\n}", "func (client DatabasesClient) GetReplicationLinkResponder(resp *http.Response) (result ReplicationLink, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (c client) URL() string {\n\treturn c.url\n}", "func (s *service) FetchLink(id uint) (*domain.Link, error) {\n\treturn s.linkRepo.FindByID(id)\n}", "func (m *MultiDynamicClient) GetClient() dynamic.Interface {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\tm.current = (m.current + 1) % len(m.clients)\n\treturn m.clients[m.current]\n}", "func (thread *Thread) Link() string {\n\treturn \"/thread/\" + thread.ID\n}", "func (o ConnectionAzureOutput) ClientId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ConnectionAzure) *string { return v.ClientId }).(pulumi.StringPtrOutput)\n}", "func ExampleLinkerClient_BeginUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicelinker.NewClientFactory(cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewLinkerClient().BeginUpdate(ctx, \"subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app\", \"linkName\", armservicelinker.LinkerPatch{\n\t\tProperties: &armservicelinker.LinkerProperties{\n\t\t\tAuthInfo: &armservicelinker.ServicePrincipalSecretAuthInfo{\n\t\t\t\tAuthType: to.Ptr(armservicelinker.AuthTypeServicePrincipalSecret),\n\t\t\t\tClientID: to.Ptr(\"name\"),\n\t\t\t\tPrincipalID: to.Ptr(\"id\"),\n\t\t\t\tSecret: to.Ptr(\"secret\"),\n\t\t\t},\n\t\t\tTargetService: &armservicelinker.AzureResource{\n\t\t\t\tType: to.Ptr(armservicelinker.TargetServiceTypeAzureResource),\n\t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DocumentDb/databaseAccounts/test-acc/mongodbDatabases/test-db\"),\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.LinkerResource = armservicelinker.LinkerResource{\n\t// \tName: to.Ptr(\"linkName\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceLinker/links\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app/providers/Microsoft.ServiceLinker/links/linkName\"),\n\t// \tProperties: &armservicelinker.LinkerProperties{\n\t// \t\tAuthInfo: &armservicelinker.ServicePrincipalSecretAuthInfo{\n\t// \t\t\tAuthType: to.Ptr(armservicelinker.AuthTypeServicePrincipalSecret),\n\t// \t\t\tClientID: to.Ptr(\"name\"),\n\t// \t\t\tPrincipalID: to.Ptr(\"id\"),\n\t// \t\t},\n\t// \t\tTargetService: &armservicelinker.AzureResource{\n\t// \t\t\tType: to.Ptr(armservicelinker.TargetServiceTypeAzureResource),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DocumentDb/databaseAccounts/test-acc/mongodbDatabases/test-db\"),\n\t// \t\t},\n\t// \t},\n\t// }\n}", "func RandomLink(b *testing.B, numLinks, i int) *cs.Link {\n\treturn cstesting.RandomLink()\n}", "func (c Client) getURL(url string) string {\n\treturn fmt.Sprintf(\"%s%s\", c.baseAPI, url)\n}", "func fetchLeaderboardLink(link requestable, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {\n\tif !link.exists() {\n\t\treturn nil, nil\n\t}\n\n\treturn fetchLeaderboard(link.request(options, nil, embeds))\n}", "func (ep *EpisodeParser) getLink(eachEp *goquery.Selection) string {\n\tlink, _ := eachEp.Find(\".episode-video a\").First().Attr(\"href\")\n\treturn link\n}", "func (m *MockDatabase) LinkClient(arg0, arg1 Client) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LinkClient\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func gitHubLink(s *discordgo.Session, m *discordgo.MessageCreate, _ []string) {\n\ts.ChannelMessageSend(\n\t\tm.ChannelID,\n\t\t\"Check out what's under the hood here:\"+\n\t\t\t\" https://github.com/Tkdefender88/cephBot\"+\n\t\t\t\"\\nLeave a star and make my day! :star:\")\n}", "func (o *NiaapiNewReleaseDetailAllOf) GetLink() string {\n\tif o == nil || o.Link == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Link\n}", "func getClient(c net.Conn, s *server) *Client {\n\tclient := clientPool.Get().(*Client)\n\tclient.Reset(c, s)\n\treturn client\n}", "func (m *Application) GetPublicClient()(PublicClientApplicationable) {\n return m.publicClient\n}", "func (dm *DMMasterClient) GetURL(addr string) string {\n\thttpPrefix := \"http\"\n\tif dm.tlsEnabled {\n\t\thttpPrefix = \"https\"\n\t}\n\treturn fmt.Sprintf(\"%s://%s\", httpPrefix, addr)\n}", "func GetHTTPClient() *http.Client { return httpClientPool.Get().(*http.Client) }", "func (t *Link) GetId() (v *url.URL) {\n\treturn t.id\n\n}", "func (o *InlineResponse20026) GetLink() InlineResponse20026Link {\n\tif o == nil || o.Link == nil {\n\t\tvar ret InlineResponse20026Link\n\t\treturn ret\n\t}\n\treturn *o.Link\n}", "func (o *ShortenBitlinkBodyAllOf) GetLink() string {\n\tif o == nil || o.Link == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Link\n}", "func (r *RepoInfo) Link() string {\n\tswitch r.RepoHost {\n\tcase GoogleCode:\n\t\treturn fmt.Sprintf(\"https://code.google.com/p/%s\", r.FullName)\n\tdefault:\n\t\treturn (&url.URL{Scheme: \"https\", Host: string(r.RepoHost), Path: \"/\" + r.FullName}).String()\n\t}\n}", "func linkOpen(URL string) (io.ReadCloser, error) {\n\tresp, err := http.Get(URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"HTTP Get failed: %v\", resp.StatusCode)\n\t}\n\treturn resp.Body, nil\n}", "func (d *Dynamicd) GetLinkMessages(receiver, sender string) (*[]GetMessageReturnJSON, error) {\n\tvar ret []GetMessageReturnJSON\n\tcmd := \"dynamic-cli link getaccountmessages \" + receiver + \" \" + sender + \" bridge\"\n\treq, err := NewRequest(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := <-d.ExecCmdRequest(req)\n\tvar messagesGeneric map[string]interface{}\n\terr = json.Unmarshal([]byte(res), &messagesGeneric)\n\tif err != nil {\n\t\tutil.Error.Println(\"GetLinkMessages messagesGeneric error\", err)\n\t\treturn nil, err\n\t}\n\tfor _, v := range messagesGeneric {\n\t\tb, err := json.Marshal(v)\n\t\tif err != nil {\n\t\t\tutil.Error.Println(\"GetLinkMessages json.Marshal error\", err)\n\t\t} else {\n\t\t\tvar message GetMessageReturnJSON\n\t\t\terr := json.Unmarshal(b, &message)\n\t\t\tif err != nil {\n\t\t\t\tutil.Error.Println(\"GetLinkMessages json.Unmarshal error\", err)\n\t\t\t} else {\n\t\t\t\tret = append(ret, message)\n\t\t\t}\n\t\t}\n\t}\n\treturn &ret, nil\n}", "func (t *Task) ClientID() int {\n\treturn t.clientID\n}", "func (c *ComicClient) Get(link, hostname string) (*http.Response, error) {\n\trequest, err := c.PrepareRequest(link, hostname)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.Client.Do(request)\n}", "func GetClientURLByIP(ip string) string {\n\treturn \"https://\" + net.JoinHostPort(ip, strconv.Itoa(constants.EtcdListenClientPort))\n}", "func getURL(u *UseCase, ctx context.Context, shortCode string) (*database.URL, error) {\n\n\tgetPopularURL, err := u.RedisRepo.Get(ctx, shortCode)\n\tif err != nil {\n\t\treturn nil, errors.New(ErrorGeneric)\n\t}\n\n\tif getPopularURL != nil {\n\t\tresponse := new(database.URL)\n\t\terr = json.Unmarshal([]byte(*getPopularURL), response)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(ErrorGeneric)\n\t\t}\n\t\treturn response, nil\n\t}\n\n\tresponse, err := u.DatabaseRepo.GetURL(shortCode)\n\tif err != nil {\n\t\treturn nil, errors.New(ErrorGeneric)\n\t}\n\n\tif response == nil {\n\t\treturn nil, errors.New(ErrorRecordNotFound)\n\t}\n\n\treturn response, nil\n\n}", "func getDeployerLink(value string) string {\n\tvar deployerLink string\n\n\tslug := getProjectSlug(value)\n\tif len(slug) > 0 {\n\t\tdeployerLink = fmt.Sprintf(\"%s%s\", config.C.DeployerLink, slug)\n\t}\n\treturn deployerLink\n}", "func ExampleLinkerClient_BeginCreateOrUpdate_putLinkWithSecretStore() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicelinker.NewClientFactory(cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewLinkerClient().BeginCreateOrUpdate(ctx, \"subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app\", \"linkName\", armservicelinker.LinkerResource{\n\t\tProperties: &armservicelinker.LinkerProperties{\n\t\t\tAuthInfo: &armservicelinker.SecretAuthInfo{\n\t\t\t\tAuthType: to.Ptr(armservicelinker.AuthTypeSecret),\n\t\t\t},\n\t\t\tSecretStore: &armservicelinker.SecretStore{\n\t\t\t\tKeyVaultID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-kv\"),\n\t\t\t},\n\t\t\tTargetService: &armservicelinker.AzureResource{\n\t\t\t\tType: to.Ptr(armservicelinker.TargetServiceTypeAzureResource),\n\t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DocumentDb/databaseAccounts/test-acc/mongodbDatabases/test-db\"),\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.LinkerResource = armservicelinker.LinkerResource{\n\t// \tName: to.Ptr(\"linkName\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceLinker/links\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/test-app/providers/Microsoft.ServiceLinker/links/linkName\"),\n\t// \tProperties: &armservicelinker.LinkerProperties{\n\t// \t\tAuthInfo: &armservicelinker.SecretAuthInfo{\n\t// \t\t\tAuthType: to.Ptr(armservicelinker.AuthTypeSecret),\n\t// \t\t},\n\t// \t\tSecretStore: &armservicelinker.SecretStore{\n\t// \t\t\tKeyVaultID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-kv\"),\n\t// \t\t},\n\t// \t\tTargetService: &armservicelinker.AzureResource{\n\t// \t\t\tType: to.Ptr(armservicelinker.TargetServiceTypeAzureResource),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.DocumentDb/databaseAccounts/test-acc/mongodbDatabases/test-db\"),\n\t// \t\t},\n\t// \t},\n\t// }\n}", "func (c *Config) client(httpClient *http.Client) *github.Client {\n\t{\n\t\t// Avoid modifying httpClient.\n\t\ttmp := *httpClient\n\t\ttmp.Transport = c.applyAppdash(tmp.Transport)\n\t\thttpClient = &tmp\n\t}\n\n\tg := github.NewClient(httpClient)\n\tif c.BaseURL != nil {\n\t\tg.BaseURL = c.BaseURL\n\t}\n\treturn g\n}", "func (app *application) Link(additional uint) error {\n\tendpoint := fmt.Sprintf(\"%s%d\", \"/links/\", additional)\n\turl := fmt.Sprintf(baseFormat, app.url, endpoint)\n\tresp, err := app.client.R().\n\t\tSetHeader(shared.TokenHeadKeyname, app.token).\n\t\tPost(url)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode() == http.StatusOK {\n\t\treturn nil\n\t}\n\n\treturn errors.New(string(resp.Body()))\n}", "func (m *CommsNotification) GetResourceUrl()(*string) {\n return m.resourceUrl\n}", "func GetClient(accessKey string) Client {\n\treturn Client{\n\t\tAccKey: accessKey,\n\t\tMethod: bingAPIMethodPost,\n\t\tUserAgent: bingAPIUserAgent,\n\t\tReqTimeout: bingAPIReqTimeout,\n\t}\n}" ]
[ "0.62175614", "0.6116539", "0.60092264", "0.5966208", "0.5790656", "0.5755509", "0.5704431", "0.5681741", "0.5604399", "0.560293", "0.55288225", "0.5463221", "0.544955", "0.54428774", "0.5407513", "0.5407395", "0.5405211", "0.5400451", "0.539615", "0.53931314", "0.53883725", "0.53785247", "0.53634566", "0.53584224", "0.53261954", "0.5311501", "0.53089243", "0.5291336", "0.52807575", "0.5273892", "0.52619964", "0.5258246", "0.52491164", "0.52460754", "0.5234811", "0.52263594", "0.519534", "0.51580435", "0.51552874", "0.51454157", "0.5138913", "0.51369286", "0.51367867", "0.51131034", "0.5112881", "0.5108525", "0.51006705", "0.5081005", "0.5067496", "0.5067316", "0.5044395", "0.5030446", "0.50200737", "0.5017451", "0.50111854", "0.50020903", "0.49870062", "0.49769634", "0.49749446", "0.49721155", "0.49672458", "0.4960893", "0.49601096", "0.49576044", "0.49572647", "0.4954772", "0.49535432", "0.4952806", "0.49524263", "0.49510366", "0.49457037", "0.49418536", "0.4940034", "0.4939676", "0.49384743", "0.4932617", "0.49271256", "0.49218026", "0.4918615", "0.49054778", "0.4900429", "0.4885086", "0.4884687", "0.48804608", "0.48716083", "0.48659682", "0.4862639", "0.4856267", "0.48538283", "0.48521057", "0.4842479", "0.4839247", "0.48343468", "0.48337328", "0.4832639", "0.4831651", "0.48315018", "0.48275858", "0.48246405", "0.48231608" ]
0.6794283
0
DeleteAlbum will delete a given album
func DeleteAlbum(albumDeleteHash string, clientID string) { url := "https://api.imgur.com/3/album/" + albumDeleteHash method := "DELETE" payload := &bytes.Buffer{} writer := multipart.NewWriter(payload) err := writer.Close() if err != nil { fmt.Println(err) } client := &http.Client{} req, err := http.NewRequest(method, url, payload) if err != nil { fmt.Println(err) } req.Header.Add("Authorization", "Client-ID "+clientID) req.Header.Set("Content-Type", writer.FormDataContentType()) res, err := client.Do(req) defer res.Body.Close() body, err := ioutil.ReadAll(res.Body) if err != nil { fmt.Println(err) } if strings.Contains(string(body), "200") { fmt.Println(color.GreenString("[+]"), "Delete was a success") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func deleteAlbum(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\",\"application/json\")\n\tparam := mux.Vars(r)\n\t//CQL Operation\n\tif err:= Session.Query(`DELETE FROM albumtable WHERE albname=? IF EXISTS;`,param[\"album\"]).Exec();err!=nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Fprintf(w, \"Album deleted\")\n\t}\n}", "func DeleteAlbum(albName string) *utils.ApplicationError {\n\treturn model.DeleteAlbum(albName)\n}", "func DeleteAlbum(albumID int) error {\n\tdeleteAlbum := `DELETE FROM Album WHERE id = ?`\n\tif _, err := configure.SQL.Query(deleteAlbum, albumID); err != nil {\n\t\tlog.Println(\"Failed to delete album\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func (ar AlbumDbRepository) Delete(entity *domain.Album) (err error) {\n\t// Delete all tracks.\n\tif len(entity.Tracks) == 0 {\n\t\tar.populateTracks(entity)\n\t}\n\ttracksRepo := TrackDbRepository{AppContext: ar.AppContext}\n\tfor i := range entity.Tracks {\n\t\ttracksRepo.Delete(&entity.Tracks[i])\n\t}\n\n\t// Then delete album.\n\t_, err = ar.AppContext.DB.Delete(entity)\n\treturn\n}", "func TestAlbumAddDeletePhoto(t *testing.T) {\n\talbum := api.Album{\n\t\tAlbumTitle: WellKnownAlbumTitle,\n\t}\n\n\tnewAlbum, err := Client.V1().CreateAlbum(album)\n\tif err != nil {\n\t\tt.Errorf(\"expected success creating album: %v\", err)\n\t\tt.FailNow()\n\t}\n\n\t// Add Photos\n\tphotos := []string{\n\t\tWellKnownPhotoID,\n\t}\n\terr = Client.V1().AddPhotosToAlbum(newAlbum.AlbumUID, photos)\n\tif err != nil {\n\t\tt.Errorf(\"expected to add photos to album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\t// Get the photos by album\n\tupdatedPhotos, err := Client.V1().GetPhotos(&api.PhotoOptions{\n\t\tCount: 100,\n\t\tAlbumUID: newAlbum.AlbumUID,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"expecting to list photos by album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\tvar updatedPhotoIDs []string\n\tfor _, photo := range updatedPhotos {\n\t\tupdatedPhotoIDs = append(updatedPhotoIDs, photo.PhotoUID)\n\t}\n\tif len(updatedPhotos) != 2 {\n\t\tt.Errorf(\"expecting 2 well known photo in album, found: %d\", len(updatedPhotos))\n\t}\n\n\terr = Client.V1().DeletePhotosFromAlbum(newAlbum.AlbumUID, updatedPhotoIDs)\n\tif err != nil {\n\t\tt.Errorf(\"expected to delete newly created photos from album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\t// Get the photos by album\n\tupdatedPhotos, err = Client.V1().GetPhotos(&api.PhotoOptions{\n\t\tCount: 100,\n\t\tAlbumUID: newAlbum.AlbumUID,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"expecting to list photos by album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\tif len(updatedPhotos) != 0 {\n\t\tt.Errorf(\"expected empty album, found %d photos\", len(updatedPhotos))\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\terr = Client.V1().DeleteAlbums([]string{newAlbum.AlbumUID})\n\tif err != nil {\n\t\tt.Errorf(\"expected delete album %s, album not deleted: %v\", newAlbum.AlbumUID, err)\n\t\tt.FailNow()\n\t}\n\n\t// put the album back\n\tCreateWellKnownAlbum()\n}", "func (l *Lidarr) DeleteAlbum(albumID int64, deleteFiles, addImportExclusion bool) error {\n\treturn l.DeleteAlbumContext(context.Background(), albumID, deleteFiles, addImportExclusion)\n}", "func (l *Lidarr) DeleteAlbumContext(ctx context.Context, albumID int64, deleteFiles, addImportExclusion bool) error {\n\treq := starr.Request{URI: path.Join(bpAlbum, fmt.Sprint(albumID)), Query: make(url.Values)}\n\treq.Query.Set(\"deleteFiles\", fmt.Sprint(deleteFiles))\n\treq.Query.Set(\"addImportListExclusion\", fmt.Sprint(addImportExclusion))\n\n\tif err := l.DeleteAny(ctx, req); err != nil {\n\t\treturn fmt.Errorf(\"api.Delete(%s): %w\", &req, err)\n\t}\n\n\treturn nil\n}", "func (a API) DeleteSong(c *gin.Context) (int, interface{}, error) {\n\tname := c.Param(\"name\")\n\te := a.err.Fn(\"DeleteSong\").Tag(\"name\", name)\n\tinvalid, err := a.s.deleteSong(name)\n\tif err != nil {\n\t\treturn 500, nil, e.UK(err)\n\t}\n\tif invalid {\n\t\treturn 400, nil, e.DB(err)\n\t}\n\treturn 201, nil, nil\n}", "func DeleteArtistSong(id int,o orm.Ormer) (err error) {\n\tsql := \"DELETE FROM artist_song WHERE artist_song.artist_id = \" + strconv.Itoa(id)\n\t_, err = o.Raw(sql).Exec()\n\treturn err\n}", "func CreateAlbum(title string, clientID string) (albumID, deleteHash interface{}) {\n\n\tapiURL := \"https://api.imgur.com\"\n\tresource := \"/3/album/\"\n\tdata := url.Values{}\n\tdata.Set(\"title\", title)\n\n\tu, _ := url.ParseRequestURI(apiURL)\n\tu.Path = resource\n\turlStr := u.String() // \"https://api.com/user/\"\n\n\tclient := &http.Client{}\n\tr, _ := http.NewRequest(\"POST\", urlStr, strings.NewReader(data.Encode())) // URL-encoded payload\n\tr.Header.Add(\"Authorization\", \"Client-ID \"+clientID)\n\tr.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tr.Header.Add(\"Content-Length\", strconv.Itoa(len(data.Encode())))\n\n\tresp, _ := client.Do(r)\n\tvar result map[string]interface{}\n\n\tjson.NewDecoder(resp.Body).Decode(&result)\n\n\tnestedMap := result[\"data\"]\n\tnewMap, _ := nestedMap.(map[string]interface{})\n\n\talbumID = newMap[\"id\"]\n\tdeleteHash = newMap[\"deletehash\"]\n\n\tfmt.Println(color.GreenString(\"\\n[+]\"), \"Successfully created an album with the following values:\")\n\tfmt.Println(color.GreenString(\"albumID:\"), albumID, color.GreenString(\"Album DeleteHash:\"), deleteHash)\n\tfmt.Println(\" \")\n\n\treturn albumID, deleteHash\n\n}", "func (ar AlbumDbRepository) CleanUp() error {\n\t_, err := ar.AppContext.DB.Exec(\"DELETE FROM albums WHERE NOT EXISTS (SELECT id FROM tracks WHERE tracks.album_id = albums.id)\")\n\treturn err\n}", "func DeleteSong(songID int) error {\n\tdb := database.ConnectToDatabase()\n\tdefer db.Close()\n\n\t_, err := db.Model(&structs.SongInstrument{}).Where(\"song_id = ?\", songID).Delete()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = db.Model(&structs.Song{}).Where(\"id = ?\", songID).Delete()\n\treturn err\n}", "func PutAlbum(jsonData string, db *neoism.Database) string {\n\t// TODO: Write a data verification method\n\n\t// Parse the json data into an album struct\n\tvar a Album\n\terr := json.Unmarshal([]byte(jsonData), &a)\n\tif err != nil {\n\t\treturn \"{ \\\"err\\\": \\\"Unable to parse json request\\\" }\"\n\t\tfmt.Println(err)\n\t}\n\n\t// Set the submitted date to the current time\n\ta.Submitted = int32(time.Now().Unix())\n\tfmt.Println(a.Submitted)\n\n\t// Create a new node in Neo4j DB\n\tres := []struct {\n\t\tN neoism.Node\n\t}{}\n\n\tcq := neoism.CypherQuery{\n\t\tStatement: \"CREATE (n:Album {name: {name}, year: {year}, submitted: {submitted}}) RETURN n\",\n\t\tParameters: neoism.Props{\"name\": a.Name, \"year\": a.Year, \"submitted\": a.Submitted},\n\t\tResult: res,\n\t}\n\tdb.Cypher(&cq)\n\n\t// TODO: Create relationships to artist, genre\n\n\treturn \"\"\n}", "func (t *SimpleChaincode) deleteArtwork(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar jsonResp string\n\tvar ArtworkJSON Artwork\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting TokenID.\")\n\t}\n\ttokenID := args[0]\n\n\tvalAsbytes, err := stub.GetState(tokenID) //get the Artwork from chaincode state\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + tokenID + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t} else if valAsbytes == nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Artwork does not exist: \" + tokenID + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t}\n\n\terr = json.Unmarshal([]byte(valAsbytes), &ArtworkJSON)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to decode JSON of: \" + tokenID + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t}\n\n\terr = stub.DelState(tokenID) //remove the Artwork from chaincode state\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to delete state:\" + err.Error())\n\t}\n\n\treturn shim.Success(nil)\n}", "func DeleteBook(isbn string) {\n delete(books, isbn)\n}", "func deleteImage(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\",\"application/json\")\n\tparam := mux.Vars(r)\n\t//CQL Operation\n\tif err:= Session.Query(`UPDATE albumtable SET imagelist=imagelist-['?'] WHERE albname=?;`,param[\"image\"],param[\"album\"]).Exec();err!=nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Fprintf(w, \"New image added\")\n\t}\n}", "func (s *Service) deleteSong(name string) (invalid bool, err error) {\n\te := s.err.Fn(\"deleteSong\")\n\tok, err := s.db.delete(name)\n\tif err != nil {\n\t\treturn true, e.Wrap(err, \"deleting song\")\n\t} else if !ok {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (a *Media_ArtistsApiService) DeleteArtist(ctx context.Context, id int64) ( *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/media/artists/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", fmt.Sprintf(\"%v\", id), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t localVarHttpResponse, err := a.client.callAPI(r)\n\t if err != nil || localVarHttpResponse == nil {\n\t\t return localVarHttpResponse, err\n\t }\n\t defer localVarHttpResponse.Body.Close()\n\t if localVarHttpResponse.StatusCode >= 300 {\n\t\treturn localVarHttpResponse, reportError(localVarHttpResponse.Status)\n\t }\n\n\treturn localVarHttpResponse, err\n}", "func ArtistAlbum(id string, page, limit int) (string, error) {\n\t_offset, _limit := formatParams(page, limit)\n\tpreParams := \"{\\\"offset\\\": \"+ _offset +\", \\\"limit\\\": \"+_limit +\", \\\"total\\\": true, \\\"csrf_token\\\": \\\"\\\"}\"\n\tparams, encSecKey, encErr := EncParams(preParams)\n\tif encErr != nil {\n\t\treturn \"\", encErr\n\t}\n\tres, resErr := post(\"http://music.163.com/weapi/artist/albums/\"+id, params, encSecKey)\n\tif resErr != nil {\n\t\treturn \"\", resErr\n\t}\n\treturn res, nil\n}", "func (c MockedCache) InvalidateAlbum(ctx context.Context, title string) error {\n\treturn c.InvalidateAlbumFn(ctx, title)\n}", "func DeletePhoto(id string) error {\n\tclient, ctx, cancel := getDBConnection()\n\tdefer cancel()\n\tdefer client.Disconnect(ctx)\n\n\tcol := client.Database(\"cat-scribers\").Collection(\"photos\")\n\n\toid, _ := primitive.ObjectIDFromHex(id)\n\n\t_, err := col.DeleteOne(ctx, bson.M{\"_id\": oid})\n\n\treturn err\n}", "func deleteAsset(c client.Client, asset kabanerov1alpha1.RepositoryAssetStatus, assetOwner metav1.OwnerReference) error {\n\tu := &unstructured.Unstructured{}\n\tu.SetGroupVersionKind(schema.GroupVersionKind{\n\t\tGroup: asset.Group,\n\t\tVersion: asset.Version,\n\t\tKind: asset.Kind,\n\t})\n\n\terr := c.Get(context.Background(), client.ObjectKey{\n\t\tNamespace: asset.Namespace,\n\t\tName: asset.Name,\n\t}, u)\n\n\tif err != nil {\n\t\tif errors.IsNotFound(err) == false {\n\t\t\tlog.Error(err, fmt.Sprintf(\"Unable to check asset name %v\", asset.Name))\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t// Get the owner references. See if we're the last one.\n\t\townerRefs := u.GetOwnerReferences()\n\t\tnewOwnerRefs := []metav1.OwnerReference{}\n\t\tfor _, ownerRef := range ownerRefs {\n\t\t\tif ownerRef.UID != assetOwner.UID {\n\t\t\t\tnewOwnerRefs = append(newOwnerRefs, ownerRef)\n\t\t\t}\n\t\t}\n\n\t\tif len(newOwnerRefs) == 0 {\n\t\t\terr = c.Delete(context.TODO(), u)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, fmt.Sprintf(\"Unable to delete asset name %v\", asset.Name))\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tu.SetOwnerReferences(newOwnerRefs)\n\t\t\terr = c.Update(context.TODO(), u)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, fmt.Sprintf(\"Unable to delete owner reference from %v\", asset.Name))\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (k Keeper) DeleteArtist(ctx sdk.Context, key string) {\n\tstore := ctx.KVStore(k.storeKey)\n\tstore.Delete([]byte(types.ArtistPrefix + key))\n}", "func Delete(w http.ResponseWriter, r *http.Request) {\n\t// get user info\n\tusername := r.Context().Value(\"username\")\n\tif username == nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// retrieve photo id from api call\n\tvar requestedPhoto Photo\n\terr := json.NewDecoder(r.Body).Decode(&requestedPhoto)\n\tif err != nil {\n\t\tw.Write([]byte(\"Missing PhotoID or IsPublic attribute\"))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif requestedPhoto.ID == \"\" {\n\t\tw.Write([]byte(\"PhotoID not provided in request body\"))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// make sure photo exists\n\tvar photos []Photo\n\tDB.Where(&Photo{ID: requestedPhoto.ID}).Find(&photos)\n\n\tif len(photos) > 1 {\n\t\tw.Write([]byte(\"Multiple photos returned\"))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\n\t}\n\n\tif len(photos) == 0 {\n\t\tw.Write([]byte(\"No photos returned\"))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tphoto := photos[0]\n\n\t// Make sure photo belongs to user\n\tuserID, err := GetUserGUID(username.(string))\n\tif photo.UserID != *userID {\n\t\tw.Write([]byte(\"photo does not belong to user\"))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// delete photo from photos table\n\tDB.Delete(&photo)\n\n\t// delete file from bucket\n\timageFile := Client.Bucket(getBucketForPhoto(photo)).Object(photo.ID)\n\tif err = imageFile.Delete(r.Context()); err != nil {\n\t\terr = fmt.Errorf(\"Object(%q).Delete: %v\", photo.ID, err)\n\t\tw.Write([]byte(err.Error()))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write([]byte(\"photo deleted\"))\n\tw.WriteHeader(http.StatusOK)\n}", "func (c *Client) DeleteAsset(displayID int) error {\n\tvar out AssetItem\n\terr := c.WriteObject(fmt.Sprintf(\"/api/v2/assets/%v\", displayID), \"DELETE\", \"\", &out)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (handle ArtistHandler) Delete(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\n\tif err := model.NewArtistDAO(handle.db).Delete(params[\"id\"]); err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tlog.Printf(\"%s - %s - %s\\n\", req.Method, req.Host, req.URL.Path)\n\tw.Write([]byte(\"success!\"))\n}", "func (controller controller) DeleteBook() {\n\tgroup := controller.createNewGroup()\n\tgroup.Delete(\"/:id\", func(c *fiber.Ctx) {\n\t\tid := c.Params(\"id\")\n\t\tbooksRepo := BooksRepository{DB: controller.db}\n\t\terr := booksRepo.Delete(id)\n\t\tresponse := core.Response{Status: \"OK\", Code: 200}\n\t\tif err != nil {\n\t\t\tresponse.Status = \"NOTOK\"\n\t\t\tresponse.Err = err.Error()\n\t\t\tc.Status(200).JSON(response)\n\t\t\treturn\n\t\t}\n\t\tc.Status(200).JSON(response)\n\t})\n}", "func (s service) DeleteBook(id int) error {\n\tsentence := `DELETE FROM library WHERE id=?;`\n\t_, err := s.db.Exec(sentence, id)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func postAlbums(c *gin.Context) {\n\n\tvar newAlbum album.Album\n\n\tif err := c.BindJSON(&newAlbum); err != nil {\n\t\treturn\n\t}\n\n\tres, err := dbClient.Exec(\"INSERT INTO album (id, title, artist, price) VALUES (?, ?, ?, ?);\",\n\t\tnewAlbum.ID, newAlbum.Title, newAlbum.Artist, newAlbum.Price)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tid, err := res.LastInsertId()\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tnewAlbum.ID = int(id)\n\n\tc.IndentedJSON(http.StatusCreated, newAlbum)\n}", "func (s *Song) Delete() error {\n\treturn DB.DeleteSong(s)\n}", "func (mdb MongoDBConnection) Delete(a Agent) error {\n\tmdb.session = mdb.GetSession()\n\tdefer mdb.session.Close()\n\tdb := mdb.session.DB(\"dockmaster\").C(\"containers\")\n\terr := db.Remove(bson.M{\"agentid\": a.AgentID})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *SmartContract) DeleteAsset(ctx contractapi.TransactionContextInterface, id string) error {\n\n\tasset, err := s.ReadAsset(ctx, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclientID, err := s.GetSubmittingClientIdentity(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif clientID != asset.Owner {\n\t\treturn fmt.Errorf(\"submitting client not authorized to update asset, does not own asset\")\n\t}\n\n\treturn ctx.GetStub().DelState(id)\n}", "func (dao ArtistDAO) Delete(id string) error {\n\treturn dao.getCollection().Remove(bson.M{\"_id\": bson.ObjectIdHex(id)})\n}", "func DeleteImage(c * gin.Context){\n\tdb := database.DBConn()\n\tid:= c.Param(\"id\")\n\t_, err := db.Query(\"Delete FROM images WHERE id = \" + id)\n\tif err != nil{\n\t\tc.JSON(500, gin.H{\n\t\t\t\"messages\" : \"Story not found\",\n\t\t});\n\t\tpanic(\"error delte clothes\")\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"messages\": \"deleted\",\n\t})\n\tdefer db.Close()\n}", "func (s *Song) Delete(res http.ResponseWriter, req *http.Request) error {\n\t// See BeforeAPIDelete above, how we have checked the request for some\n\t// form of auth. This could be done here instead, but if it is done once\n\t// above, it means the request is valid here too.\n\treturn nil\n}", "func (c *TestClient) DeleteSnapshot(project, name string) error {\n\tif c.DeleteSnapshotFn != nil {\n\t\treturn c.DeleteSnapshotFn(project, name)\n\t}\n\treturn c.client.DeleteSnapshot(project, name)\n}", "func DeleteAsset(settings *playfab.Settings, postData *DeleteAssetRequestModel, entityToken string) (*EmptyResponseModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/MultiplayerServer/DeleteAsset\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &EmptyResponseModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func (q *BookQueries) DeleteBook(id uuid.UUID) error {\n\t// Define query string.\n\tquery := `DELETE FROM books WHERE id = $1`\n\n\t// Send query to database.\n\t_, err := q.Exec(query, id)\n\tif err != nil {\n\t\t// Return only error.\n\t\treturn err\n\t}\n\n\t// This query returns nothing.\n\treturn nil\n}", "func (b *book) deleteBook(db *sql.DB) error {\n\t_, err := db.Exec(\"DELETE FROM books WHERE id=$1\", b.ID)\n\n\treturn err\n}", "func DeleteVault(c *gin.Context) {\n\tdbmap := c.MustGet(\"DBmap\").(*gorp.DbMap)\n\tid := c.Params.ByName(\"id\")\n\n\tvar vault Vault\n\terr := dbmap.SelectOne(&vault, \"SELECT * FROM vault WHERE id=?\", id)\n\n\tif err == nil {\n\t\t_, err = dbmap.Delete(&vault)\n\n\t\tif err == nil {\n\t\t\tc.JSON(200, gin.H{\"id #\" + id: \"deleted\"})\n\t\t} else {\n\t\t\tcheckErr(err, \"Delete failed\")\n\t\t}\n\n\t} else {\n\t\tc.JSON(404, gin.H{\"error\": \"vault not found\"})\n\t}\n\n\t// curl -i -X DELETE http://localhost:8080/api/v1/vaults/1\n}", "func NewAlbum() *Album {\n\talbum := new(Album)\n\talbum.Id = 0\n\talbum.Key = \"\"\n\talbum.Title = \"\"\n\talbum.Artists = dna.StringArray{}\n\talbum.Plays = 0\n\talbum.Songids = dna.IntArray{}\n\talbum.Nsongs = 0\n\talbum.Description = \"\"\n\talbum.Coverart = \"\"\n\talbum.DateCreated = time.Time{}\n\talbum.Checktime = time.Time{}\n\treturn album\n}", "func DeletePhoto(photoID int) error {\n\tdeletePhoto := `DELETE FROM Photo WHERE id = ?`\n\tif _, err := configure.SQL.Query(deletePhoto, photoID); err != nil {\n\t\tlog.Println(\"Failed to delete photo\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func (client *Client) DeleteSnapshot(names ...string) (*Response, *ResponseStatus, error) {\n\treturn client.FormattedRequest(\"/delete/snapshot/%q\", strings.Join(names, \",\"))\n}", "func addAlbum(album Album) (int64, error) {\n\tresult, err := db.Exec(\"INSERT INTO album (title, artist, price) VALUES (?, ?, ?)\", album.Title, album.Artist, album.Price)\n\tif nil != err {\n\t\treturn 0, fmt.Errorf(\"addAlbum: %v\", err)\n\t}\n\tid, err := result.LastInsertId()\n\tif nil != err {\n\t\treturn 0, fmt.Errorf(\"addAlbum: %v\", err)\n\t}\n\treturn id, nil\n}", "func (c *CachitaCache) InvalidateAlbum(ctx context.Context, title string) error {\n\treturn c.store.Invalidate(c.albumKey(title))\n}", "func delete_asset(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tfmt.Println(\"starting delete_asset\")\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 2\")\n\t}\n\n\tid := args[0]\n\t// get the asset\n\t_, err := get_asset(stub, id)\n\tif err != nil {\n\t\tfmt.Println(\"Failed to find asset by id \" + id)\n\t\treturn nil, errors.New(err.Error())\n\t}\n\n\t// remove the asset\n\terr = stub.DelState(id) //remove the key from chaincode state\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to delete state\")\n\t}\n\n\tfmt.Println(\"- end delete_asset\")\n\treturn nil, nil\n}", "func DeleteBook(c *fiber.Ctx) {\n\t//c.Send(\"Delete a book \")\n\tid := c.Params(\"id\")\n\tdb := DBConn\n\tvar book Book\n\tdb.First(&book, id)\n\tif book.Title == \"\" {\n\t\tc.Status(500).Send(\"No Book Found\")\n\t\treturn\n\t}\n\tdb.Delete(&book, id)\n\tc.Send(\"Book deleted successfully\")\n}", "func apiArchiveDeleteDocument(\n\tctx *ApiContext, req *http.Request, params httprouter.Params,\n) *ApiResponse {\n\tcollectionId := params.ByName(\"collection\")\n\tcollection, err := ctx.Repository.Use(collectionId)\n\tif err != nil {\n\t\treturn JsonError(err, 500)\n\t}\n\tarchiveId, err := strconv.ParseUint(params.ByName(\"id\"), 10, 64)\n\n\tkey := params.ByName(\"key\")\n\tif key == \"\" {\n\t\treturn JsonError(\"Missing parameter: key\", 500)\n\t}\n\n\tarchive, err := collection.Find(uint64(archiveId))\n\tif err != nil {\n\t\treturn JsonError(\"archive not found\", 404)\n\t}\n\n\t// Remove document from archive\n\terr = archive.Remove(key, \"removed via api\")\n\tif err != nil {\n\t\treturn JsonError(err, 500)\n\t}\n\n\treturn JsonSuccess(\"OK\")\n}", "func DeleteSession(urlPrefix, id string) error {\n\tu, err := url.Parse(urlPrefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\tu.Path = path.Join(u.Path, \"session\", id)\n\treturn voidCommand(\"DELETE\", u.String(), nil)\n}", "func (a *HouseholdApiService) DeleteHouseholdUsingDelete(ctx context.Context, householdId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\ta.client = NewAPIClient(&Configuration{\n\t\tBasePath: ctx.Value(\"BasePath\").(string),\n\t\tDefaultHeader: make(map[string]string),\n\t\tUserAgent: \"Swagger-Codegen/1.0.0/go\",\n\t})\n\tlocalVarPath := a.client.cfg.BasePath + \"/nucleus/v1/household/{household_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"household_id\"+\"}\", fmt.Sprintf(\"%v\", householdId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"*/*\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (r *repository) Delete(id uint) error {\n\tif err := r.db.Where(\"id = ?\", id).Delete(&models.Upload{}).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func DeleteCar(w http.ResponseWriter, r *http.Request) {\n\tcarID := chi.URLParam(r, \"carID\")\n\tsqlQ := \"SELECT fk_user FROM cars WHERE id=$1\"\n\n\trow := Database.QueryRow(sqlQ, carID)\n\n\tvar cars Car\n\terr := row.Scan(&cars.Fkuser)\n\n\tswitch err {\n\tcase sql.ErrNoRows:\n\t\thttp.Error(w, \"requested car no longer exists\", http.StatusNotFound)\n\t\treturn\n\tcase nil:\n\tdefault:\n\t\tpanic(err)\n\t}\n\n\tclaims := GetToken(jwtauth.TokenFromHeader(r))\n\tif claims[\"id\"] != cars.Fkuser && claims[\"role\"] != \"admin\" {\n\t\thttp.Error(w, \"Unauthorized action\", http.StatusUnauthorized)\n\t\tpanic(err)\n\t}\n\tsql := \"DELETE FROM public.cars WHERE id=$1;\"\n\n\terr = Database.QueryRow(sql, carID).Err()\n\tif err != nil {\n\t\thttp.Error(w, \"wrong body structure\", http.StatusBadRequest)\n\t\tpanic(err)\n\t}\n\n\tGetCarList(w, r)\n}", "func DeleteBook(c *gin.Context) {\n\tdb := c.MustGet(\"db\").(*gorm.DB)\n\n\tvar book models.Book\n\tif err := db.Where(\"id = ?\", c.Param(\"id\")).First(&book).Error; err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": \"Record not found!\"})\n\t\treturn\n\t}\n\n\tdb.Delete(&book)\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": true})\n}", "func (u *App) Delete(c echo.Context, id string) error {\n\tif err := u.rbac.EnforceRole(c, model.AdminRole); err != nil {\n\t\treturn err\n\t}\n\n\tpost, err := u.udb.View(u.db, id)\n\tif err = zaplog.ZLog(err); err != nil {\n\t\treturn err\n\t}\n\n\tif post.Status != model.StatusDraft {\n\t\treturn zaplog.ZLog(errors.New(\"Apenas é possível eliminar artigos em rascunho\"))\n\t}\n\n\treturn u.udb.Delete(u.db, id)\n}", "func DeleteBook(c *fiber.Ctx) {\n\tid := c.Params(\"id\")\n\tdb := database.DBConn\n\n\tvar book Book\n\n\tdb.First(&book, id)\n\tif book.Title == \"\" {\n\t\tres := Response{Status: 500, Message: \"No book with given id found\"}\n\t\tc.Status(500).JSON(res)\n\t\treturn\n\t}\n\n\tdb.Delete(&book)\n\n\tres := Response{Status: 200, Message: \"Book deleted successfully\"}\n\tc.JSON(res)\n}", "func (service *AlbumDiscogService) CreateAlbumDiscog(attributes *library.AlbumAttributes) error {\n\tif service.insert == nil {\n\t\tstmt, err := service.prepareInsert()\n\t\tif err != nil {\n\t\t\tservice.session.Logger.Println(err)\n\t\t\treturn err\n\t\t}\n\t\tservice.insert = stmt\n\t}\n\n\t_, err := service.insert.Exec(\n\t\tattributes.ArtistName, attributes.ArtistSort,\n\t\tattributes.Name,\n\t\tattributes.Sort,\n\t\tattributes.ReleaseDate,\n\t\tattributes.ArtistName, attributes.ArtistSort,\n\t\tattributes.GenreName)\n\n\tif err != nil {\n\t\tservice.session.Logger.Println(err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *artistResource) delete(c *gin.Context) {\n\tid, err := strconv.Atoi(c.Param(\"artistID\"))\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err})\n\t\treturn\n\t}\n\n\tresponse, err := r.service.Delete(c, uint(id))\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"id\": response.ID,\n\t\t\"error\": \"\",\n\t\t\"message\": response,\n\t})\n}", "func NewAlbum() *Album {\n\talbum := new(Album)\n\talbum.Key = \"\"\n\talbum.Id = 0\n\talbum.EncodedKey = \"\"\n\talbum.Title = \"\"\n\talbum.Artists = dna.StringArray{}\n\talbum.Coverart = \"\"\n\talbum.Topics = dna.StringArray{}\n\talbum.Plays = 0\n\talbum.Songids = dna.IntArray{}\n\talbum.YearReleased = \"\"\n\talbum.Nsongs = 0\n\talbum.Description = \"\"\n\talbum.DateCreated = time.Time{}\n\talbum.Checktime = time.Time{}\n\t// add more 6 fields\n\talbum.IsAlbum = 0\n\talbum.IsHit = 0\n\talbum.IsOfficial = 0\n\talbum.Likes = 0\n\talbum.StatusId = 0\n\talbum.Comments = 0\n\talbum.ArtistIds = dna.IntArray{}\n\treturn album\n}", "func (o *BraceletPhoto) Delete(exec boil.Executor) error {\n\tif o == nil {\n\t\treturn errors.New(\"models: no BraceletPhoto provided for delete\")\n\t}\n\n\targs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), braceletPhotoPrimaryKeyMapping)\n\tsql := \"DELETE FROM `bracelet_photo` WHERE `id`=?\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args...)\n\t}\n\n\t_, err := exec.Exec(sql, args...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to delete from bracelet_photo\")\n\t}\n\n\treturn nil\n}", "func GetAlbumFromAPI(id dna.Int) (*Album, error) {\n\tvar album *Album = NewAlbum()\n\talbum.Id = id\n\tapialbum, err := GetAPIAlbum(id)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tif apialbum.Response.MsgCode == 1 {\n\t\t\tif GetKey(apialbum.Id) != GetKey(album.Id) {\n\t\t\t\terrMes := dna.Sprintf(\"Resulted key and computed key are not match. %v =/= %v , id: %v =/= %v\", GetKey(apialbum.Id), GetKey(album.Id), id, apialbum.Id)\n\t\t\t\tpanic(errMes.String())\n\t\t\t}\n\n\t\t\talbum.Title = apialbum.Title\n\t\t\talbum.Artists = dna.StringArray(apialbum.Artists.Split(\" , \").Map(func(val dna.String, idx dna.Int) dna.String {\n\t\t\t\treturn val.Trim()\n\t\t\t}).([]dna.String)).SplitWithRegexp(\",\").Filter(func(v dna.String, i dna.Int) dna.Bool {\n\t\t\t\tif v != \"\" {\n\t\t\t\t\treturn true\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t})\n\n\t\t\talbum.Topics = dna.StringArray(apialbum.Topics.Split(\", \").Map(func(val dna.String, idx dna.Int) dna.String {\n\t\t\t\treturn val.Trim()\n\t\t\t}).([]dna.String)).SplitWithRegexp(\" / \").Unique().Filter(func(v dna.String, i dna.Int) dna.Bool {\n\t\t\t\tif v != \"\" {\n\t\t\t\t\treturn true\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t})\n\t\t\talbum.Plays = apialbum.Plays\n\t\t\t// album.Songids\n\t\t\t// album.Nsongs\n\t\t\t// album.EncodedKey\n\t\t\t// album.Coverart\n\t\t\t// album.DateCreated\n\t\t\talbum.YearReleased = apialbum.YearReleased\n\t\t\talbum.Description = apialbum.Description.RemoveHtmlTags(\"\")\n\n\t\t\talbum.ArtistIds = apialbum.ArtistIds.Split(\",\").ToIntArray()\n\t\t\talbum.IsAlbum = apialbum.IsAlbum\n\t\t\talbum.IsHit = apialbum.IsHit\n\t\t\talbum.IsOfficial = apialbum.IsOfficial\n\t\t\talbum.Likes = apialbum.Likes\n\t\t\talbum.StatusId = apialbum.StatusId\n\t\t\talbum.Comments = apialbum.Comments\n\t\t\talbum.Checktime = time.Now()\n\t\t\treturn album, nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Message code invalid \" + apialbum.Response.MsgCode.ToString().String())\n\t\t}\n\t}\n}", "func GetAlbum(id dna.Int) (*Album, error) {\n\tapiAlbum, err := GetAPIAlbum(id)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\talbum := apiAlbum.ToAlbum()\n\t\tif album.Id == 0 {\n\t\t\treturn nil, errors.New(dna.Sprintf(\"Keeng - Album ID: %v not found\", id).String())\n\t\t} else {\n\t\t\treturn album, nil\n\t\t}\n\t}\n}", "func (s *Song) AfterAPIDelete(res http.ResponseWriter, req *http.Request) error {\n\taddr := req.RemoteAddr\n\tlog.Println(\"Song deleted by:\", addr, \"id:\", req.URL.Query().Get(\"id\"))\n\n\treturn nil\n}", "func deleteImage(c *cli.Context) error {\n\terr := checkArgCount(c, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tid := c.Args().First()\n\n\tif confirmed(c) {\n\t\tclient.Photonclient, err = client.GetClient(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdeleteTask, err := client.Photonclient.Images.Delete(id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = waitOnTaskOperation(deleteTask.ID, c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tfmt.Println(\"OK, canceled\")\n\t}\n\n\treturn nil\n}", "func (c MockedCache) PutAlbum(ctx context.Context, album Album) error {\n\treturn c.PutAlbumFn(ctx, album)\n}", "func (r *Repository) Delete(db *gorm.DB, i interface{}) error {\n\treturn db.Omit(clause.Associations).Delete(i).Error\n}", "func (s *AvatarsService) Delete (ctx context.Context, entityType string, owningObjectID string, avatarID int64) (*http.Response, error) {\n\tendpoint := fmt.Sprintf(\"universal_avatar/type/%v/owner/%v/avatar/%v\", entityType, owningObjectID, avatarID)\n\treq, err := s.client.NewRequest(\"DELETE\", endpoint, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := s.client.Do(ctx, req, nil)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\treturn resp, err\n}", "func (api *APIClient) DeleteVault(vaultName string) error {\n\tdeleteVaultRequest := graphql.NewRequest(deleteVaultRequestString)\n\tdeleteVaultInput := DeleteVaultInput{\n\t\tAffiliationName: api.Affiliation,\n\t\tVaultName: vaultName,\n\t}\n\tdeleteVaultRequest.Var(\"deleteVaultInput\", deleteVaultInput)\n\n\tvar deleteVaultResponse DeleteVaultResponse\n\tif err := api.RunGraphQlMutation(deleteVaultRequest, &deleteVaultResponse); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (m *ManagerMock) Delete(ctx context.Context, s *hub.Subscription) error {\n\targs := m.Called(ctx, s)\n\treturn args.Error(0)\n}", "func (db *DB) DeleteBook(id uint) error {\n\tb := &Book{\n\t\tID: id,\n\t}\n\tif err := db.client.Delete(b).Error; err != nil {\n\t\treturn fmt.Errorf(\"DB: Delete: %v\", err)\n\t}\n\treturn nil\n}", "func deleteDoc(c *gin.Context) {\n\tkey := c.Params.ByName(\"id\")\n\terr := os.Remove(dataDir + \"/\" + key)\n\tif err != nil {\n\t\tlog.Printf(\"Error removing document: %s\", err.Error())\n\t\tc.JSON(statusErr, newErrorResp(key, \"error removing document\", err))\n\t} else {\n\t\terr = deleteMetadata(key)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error removing metadata: %s\", err.Error())\n\t\t\tc.JSON(statusErr, newErrorResp(key, \"error removing metadata\", err))\n\t\t} else {\n\t\t\tc.JSON(statusOk, newSuccessResp(key, \"removed document\"))\n\t\t}\n\t}\n}", "func deleteVolume(vol string) {\n\tclient.RemoveVolume(vol)\n}", "func addAlbum(w http.ResponseWriter, r *http.Request){\n\tw.Header().Set(\"Content-Type\",\"application/json\")\n\tparam := mux.Vars(r)\n\tif err:= Session.Query(`INSERT INTO albumtable (albname) VALUES (?) IF NOT EXISTS;`,param[\"album\"]).Exec();err!=nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Fprintf(w, \"New album added\")\n\t}\n}", "func (juke *Jukebox) RemoveSong(name string, position int) {\n\tjuke.mux.Lock()\n\tdefer juke.mux.Unlock()\n\tjuke.remove(name, position)\n}", "func DeleteCampaign(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tcampaignID, err := strconv.Atoi(params[\"id\"])\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tcampaign, err := db.DeleteCampaign(campaignID)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tjson.NewEncoder(w).Encode(campaign)\n}", "func (n *Repository) deleteHash(ar *Artifact, h string) error {\n\thashedPom := n.generateURL(ar, fmt.Sprint(suffixPom, dot, h))\n\tif err := n.delete(hashedPom); err != nil {\n\t\treturn err\n\t}\n\n\thashedFile := n.generateURL(ar, fmt.Sprint(ar.extension(), dot, h))\n\treturn n.delete(hashedFile)\n}", "func apiArchiveDelete(\n\tctx *ApiContext, req *http.Request, params httprouter.Params,\n) *ApiResponse {\n\tcollectionId := params.ByName(\"collection\")\n\tcollection, err := ctx.Repository.Use(collectionId)\n\tif err != nil {\n\t\treturn JsonError(err, 500)\n\t}\n\tarchiveId, err := strconv.ParseUint(params.ByName(\"id\"), 10, 64)\n\n\tarchive, err := collection.Find(uint64(archiveId))\n\tif err != nil {\n\t\treturn JsonError(\"archive not found\", 404)\n\t}\n\n\terr = archive.Destroy(\"destroyed via api\")\n\tif err != nil {\n\t\treturn JsonError(\"archive delete error\", 500)\n\t}\n\n\treturn JsonSuccess(\"OK\")\n}", "func (r *AlbumsService) Get(albumId string) *AlbumsGetCall {\n\tc := &AlbumsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.albumId = albumId\n\treturn c\n}", "func DeleteBook(book_id int32, session db.Session) error {\n\tif session == nil {\n\t\treturn errors.New(\"session is nil\")\n\t}\n\n\tbook := Book{Id: int32(book_id)}\n\terr := book.Delete(session)\n\treturn err\n}", "func PostDeleteTrack(w http.ResponseWriter, r *http.Request) {\n\n\treqData := map[string]string{}\n\n\t// Parse JSON Data\n\tdec := json.NewDecoder(r.Body)\n\terr := dec.Decode(&reqData)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tt := reqData[\"track_id\"]\n\n\tcontext.tq.remove(t)\n\n\tw.WriteHeader(204)\n\tw.Write([]byte(`{\"status\":\"deleted\", \"track\":\"` + t + `\"}`))\n}", "func (m *Microservice) DeleteMicroserviceAgent() error {\n\tif m.AgentID == \"\" {\n\t\treturn nil\n\t}\n\tzap.S().Infof(\"Deleting microservice's agent managed object [id=%s]\", m.AgentID)\n\n\t_, err := m.Client.Inventory.Delete(\n\t\tm.WithServiceUser(),\n\t\tm.AgentID,\n\t)\n\tif err != nil {\n\t\tzap.S().Errorf(\"Could not delete microservice's agent managed object. %s\", err)\n\t}\n\treturn err\n}", "func handlerAdminDeleteTrack(w http.ResponseWriter, r *http.Request) {\n\tsession, err := mgo.Dial(db.HostURL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\t//Deletes all tracks in db\n\t_, err = session.DB(db.Databasename).C(db.TrackCollectionName).RemoveAll(bson.M{})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}", "func (s *Setup) DeleteGenre(w http.ResponseWriter, r *http.Request) {\n\tid, err := s.IDParser(chi.URLParam(r, \"id\"))\n\tif err != nil {\n\t\terrhandler.DecodeError(w, r, s.logger, errParseInt, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := s.model.DeleteGenre(r.Context(), id); err != nil {\n\t\terrhandler.DecodeError(w, r, s.logger, errDelete, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ts.ToJSON(w, http.StatusOK, &errhandler.APIMessage{Message: \"Genre deleted successfully!\"})\n}", "func (a *ApiDB) DeleteContract(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\tvars := mux.Vars(r)\n\tidContract, err := strconv.Atoi(vars[\"id\"])\n\n\tif err != nil {\n\t\t//w.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, `{\"message\":\"can not convert id as int\"}`)\n\n\t\treturn\n\t}\n\n\tres, _ := BUSINESS.DeleteContract(a.Db, idContract)\n\n\tif res {\n\t\tio.WriteString(w, `{\n\t\t\t\t\t\t\"status\": 200,\n\t\t\t\t\t\t\"message\": \"Delete contract success\",\n\t\t\t\t\t\t\"data\": {\n\t\t\t\t\t\t\t\"status\": 1\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}`)\n\t\treturn\n\t}\n\tio.WriteString(w, `{\"message\" : \"Can’t delete contract\"}`)\n}", "func (b *PhotosSaveBuilder) AlbumID(v int) *PhotosSaveBuilder {\n\tb.Params[\"album_id\"] = v\n\treturn b\n}", "func (client *GalleryImageVersionsClient) delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsBeginDeleteOptions) (*azcore.Response, error) {\n\treq, err := client.deleteCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := client.con.Pipeline().Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !resp.HasStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent) {\n\t\treturn nil, client.deleteHandleError(resp)\n\t}\n\treturn resp, nil\n}", "func (s *Sonar) DeleteProject(url, token string, projectKey string) error {\n\turl = strings.TrimSuffix(url, \"/\")\n\tpath := fmt.Sprintf(\"%s/api/projects/delete?project=%s\", url, projectKey)\n\n\tlog.Infof(\"test path:%s\", path)\n\treq, err := http.NewRequest(http.MethodPost, path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// -u your-token: , colon(:) is needed.\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Basic %s\", base64.StdEncoding.EncodeToString([]byte(token+\":\"))))\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Fail to delete sonarqube project as %s\", err.Error())\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Errorf(\"Fail to delete sonarqube project as %s\", err.Error())\n\t\treturn err\n\t}\n\n\tif resp.StatusCode/100 == 2 {\n\t\treturn nil\n\t}\n\n\terr = fmt.Errorf(\"Fail to delete sonarqube project as %s, resp code: %v \", body, resp.StatusCode)\n\treturn err\n}", "func resourceAliyunDatahubProjectDelete(d *schema.ResourceData, meta interface{}) error {\n\tdh := meta.(*AliyunClient).dhconn\n\n\tprojectName := d.Id()\n\treturn resource.Retry(3*time.Minute, func() *resource.RetryError {\n\t\t_, err := dh.GetProject(projectName)\n\t\tif err != nil && !NotFoundError(err) {\n\t\t\treturn resource.RetryableError(fmt.Errorf(\"when deleting project '%s', failed to access it with error: %s\", projectName, err))\n\t\t}\n\n\t\terr = dh.DeleteProject(projectName)\n\t\tif err == nil || NotFoundError(err) {\n\t\t\treturn nil\n\t\t}\n\t\tif IsExceptedErrors(err, []string{\"AuthFailed\", \"InvalidStatus\", \"ValidationFailed\"}) {\n\t\t\treturn resource.RetryableError(fmt.Errorf(\"Deleting project '%s' timeout and got an error: %#v.\", projectName, err))\n\t\t}\n\n\t\treturn resource.RetryableError(fmt.Errorf(\"Deleting project '%s' timeout.\", projectName))\n\t})\n\n}", "func NewAlbumDiscogService(s *Session) AlbumDiscogService {\n\tservice := AlbumDiscogService{session: s}\n\treturn service\n}", "func (d *ceph) Delete(op *operations.Operation) error {\n\t// Test if the pool exists.\n\tpoolExists := d.osdPoolExists()\n\tif !poolExists {\n\t\td.logger.Warn(\"Pool does not exist\", logger.Ctx{\"pool\": d.config[\"ceph.osd.pool_name\"], \"cluster\": d.config[\"ceph.cluster_name\"]})\n\t}\n\n\t// Check whether we own the pool and only remove in this case.\n\tif shared.IsTrue(d.config[\"volatile.pool.pristine\"]) {\n\t\t// Delete the osd pool.\n\t\tif poolExists {\n\t\t\terr := d.osdDeletePool()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// If the user completely destroyed it, call it done.\n\tif !shared.PathExists(GetPoolMountPath(d.name)) {\n\t\treturn nil\n\t}\n\n\t// On delete, wipe everything in the directory.\n\terr := wipeDirectory(GetPoolMountPath(d.name))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *TestClient) DeleteImage(project, name string) error {\n\tif c.DeleteImageFn != nil {\n\t\treturn c.DeleteImageFn(project, name)\n\t}\n\treturn c.client.DeleteImage(project, name)\n}", "func (c *CompositeProfileClient) DeleteCompositeProfile(cpf string, p string, ca string, v string) error {\n\tkey := CompositeProfileKey{\n\t\tName: cpf,\n\t\tProject: p,\n\t\tCompositeApp: ca,\n\t\tVersion: v,\n\t}\n\n\terr := db.DBconn.Remove(c.storeName, key)\n\tif err != nil {\n\t\treturn pkgerrors.Wrap(err, \"Delete CompositeProfile entry;\")\n\t}\n\treturn nil\n}", "func AgbotDelete(urlSuffix, credentials string, goodHttpCodes []int) (httpCode int) {\n\t// get message printer\n\tmsgPrinter := i18n.GetMessagePrinter()\n\n\t// check the agbot url\n\tagbot_url := GetAgbotSecureAPIUrlBase()\n\tif agbot_url == \"\" {\n\t\tFatal(HTTP_ERROR, msgPrinter.Sprintf(\"HZN_AGBOT_URL is not defined\"))\n\t}\n\n\t// query the agbot secure api\n\thttpCode = ExchangeDelete(\"Agbot\", agbot_url, urlSuffix, credentials, goodHttpCodes)\n\n\t// ExchangeDelete checks the http code, so we can just directly return\n\treturn httpCode\n}", "func CreateAlbum(req CreateAlbumRequest) error {\n\tinsertAlbum := `INSERT INTO Album (year, title, date) VALUES (?, ?, ?)`\n\tif _, err := configure.SQL.Query(insertAlbum, req.Year, req.AlbumTitle, req.AlbumDate); err != nil {\n\t\tlog.Println(\"Failed on inserting album\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func DeletePerson(db *sql.DB) {}", "func (n *Repository) DeleteArtifact(ar *Artifact, hashs ...string) error {\n\tpomURL := n.generateURL(ar, suffixPom)\n\tif err := n.delete(pomURL); err != nil {\n\t\treturn err\n\t}\n\n\tfileURL := n.generateURL(ar, ar.extension())\n\tif err := n.delete(fileURL); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, h := range hashs {\n\t\t//if we can't delete hash we continue\n\t\tn.deleteHash(ar, h)\n\t}\n\treturn nil\n}", "func (svc *FibreChannelInitiatorAliasService) DeleteFibreChannelInitiatorAlias(id string) error {\n\tif len(id) == 0 {\n\t\treturn fmt.Errorf(\"DeleteFibreChannelInitiatorAlias: invalid parameter specified, %s\", id)\n\t}\n\terr := svc.objectSet.DeleteObject(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (pubManager PublicationManager) Delete(id int64) error {\n\n\tvar title string\n\trow := pubManager.dbGetMasterFile.QueryRow(id)\n\terr := row.Scan(&title)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// delete all purchases relative to this publication\n\t_, err = pubManager.db.Exec(`DELETE FROM purchase WHERE publication_id=?`, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// delete the publication\n\t_, err = pubManager.db.Exec(\"DELETE FROM publication WHERE id = ?\", id)\n\treturn err\n}", "func delete_repo(repoUrl string) error {\n\tvar arg0 = \"rm\"\n\tvar arg1 = \"-rf\"\n\tvar arg2 = CLONES_DIR + \"/\" + UrlToDir(repoUrl)\n\n\tcmd := exec.Command(arg0, arg1, arg2)\n\terr := cmd.Run()\n\n\tif err != nil {\n\t\tfmt.Print(\"an error occured during the deletion 'rm -rf' of: \" + arg2 + \". \")\n\t\tfmt.Printf(\"error is: %s\\n\", err)\n\t\treturn err\n\t}\n\n\t//fmt.Println(\"local git repo deleted successfully\")\n\treturn nil\n}", "func (b *Book) Delete(db XODB) error {\n\tvar err error\n\n\t// if doesn't exist, bail\n\tif !b._exists {\n\t\treturn nil\n\t}\n\n\t// if deleted, bail\n\tif b._deleted {\n\t\treturn nil\n\t}\n\n\t// sql query\n\tconst sqlstr = `DELETE FROM booktest.books WHERE book_id = :1`\n\n\t// run query\n\tXOLog(sqlstr, b.BookID)\n\t_, err = db.Exec(sqlstr, b.BookID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set deleted\n\tb._deleted = true\n\n\treturn nil\n}", "func ShowAlbum() ([]string, *utils.ApplicationError) {\n\treturn model.ShowAlbum()\n}" ]
[ "0.7706165", "0.7658952", "0.75001156", "0.6861333", "0.64253026", "0.63472575", "0.6216661", "0.5676933", "0.55586964", "0.544517", "0.543593", "0.5422393", "0.52709895", "0.52666813", "0.51891136", "0.5181241", "0.5172769", "0.5160368", "0.5138855", "0.51251847", "0.5108209", "0.50666946", "0.50512946", "0.5047464", "0.5029657", "0.500955", "0.49977583", "0.49861637", "0.49639642", "0.49535805", "0.49424893", "0.49417233", "0.49336073", "0.4915441", "0.48977542", "0.4884921", "0.48577598", "0.4855134", "0.4851362", "0.4847973", "0.48476648", "0.4846355", "0.48385423", "0.48370814", "0.48365575", "0.48337284", "0.48306006", "0.48304182", "0.48179093", "0.48164213", "0.48118034", "0.480939", "0.48087507", "0.4802889", "0.47982424", "0.47898358", "0.47861525", "0.47857153", "0.4784528", "0.4769443", "0.4769186", "0.4755861", "0.47507352", "0.47482038", "0.4746729", "0.4743518", "0.47400784", "0.47378427", "0.4730068", "0.47217014", "0.47192198", "0.47096926", "0.470414", "0.46974045", "0.46871275", "0.4686305", "0.46829498", "0.46791306", "0.46665055", "0.46664882", "0.46636042", "0.46516392", "0.46508414", "0.46425557", "0.46420577", "0.46309438", "0.4628057", "0.46248123", "0.4621196", "0.46196705", "0.46142566", "0.4598182", "0.45955852", "0.45925444", "0.45877966", "0.45830384", "0.4582962", "0.45799357", "0.45794174", "0.4579143" ]
0.77250826
0
NewCreateStorageV1CSINodeOK creates CreateStorageV1CSINodeOK with default headers values
func NewCreateStorageV1CSINodeOK() *CreateStorageV1CSINodeOK { return &CreateStorageV1CSINodeOK{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *CreateStorageV1CSINodeOK) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeOK {\n\to.Payload = payload\n\treturn o\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, ecsBucket.User, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n log.Print(bucketCreateResponse)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n log.Print(err)\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n retentionEnabled := false\n headers[\"x-emc-retention-period\"] = []string{\"0\"}\n if ecsBucket.Retention != \"\" {\n days, err := strconv.ParseInt(ecsBucket.Retention, 10, 64)\n if err == nil {\n if days > 0 {\n seconds := days * 24 * 3600\n headers[\"x-emc-retention-period\"] = []string{int64toString(seconds)}\n retentionEnabled = true\n }\n }\n }\n var expirationCurrentVersions int64\n expirationCurrentVersions = 0\n if ecsBucket.ExpirationCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationCurrentVersions, 10, 64)\n if err == nil {\n expirationCurrentVersions = days\n }\n }\n var expirationNonCurrentVersions int64\n expirationNonCurrentVersions = 0\n if ecsBucket.ExpirationNonCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationNonCurrentVersions, 10, 64)\n if err == nil && ecsBucket.EnableVersioning {\n expirationNonCurrentVersions = days\n }\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n versioningStatusOK := true\n lifecyclePolicyStatusOK := true\n // If the bucket has been created\n if bucketCreateResponse.Code == 200 {\n if !retentionEnabled && ecsBucket.EnableVersioning {\n // Enable versioning\n enableVersioningHeaders := map[string][]string{}\n enableVersioningHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n versioningConfiguration := `\n <VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\n <Status>Enabled</Status>\n <MfaDelete>Disabled</MfaDelete>\n </VersioningConfiguration>\n `\n enableVersioningResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?versioning\", enableVersioningHeaders, versioningConfiguration)\n if enableVersioningResponse.Code != 200 {\n versioningStatusOK = false\n }\n }\n if expirationCurrentVersions > 0 || expirationNonCurrentVersions > 0 {\n lifecyclePolicyHeaders := map[string][]string{}\n lifecyclePolicyHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n lifecyclePolicyConfiguration := `\n <LifecycleConfiguration>\n <Rule>\n <ID>expiration</ID>\n <Prefix></Prefix>\n <Status>Enabled</Status>\n `\n if expirationCurrentVersions > 0 && expirationNonCurrentVersions > 0 {\n // Enable expiration for both current and non current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n } else {\n if expirationCurrentVersions > 0 {\n // Enable expiration for current versions only\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n }\n if expirationNonCurrentVersions > 0 {\n // Enable expiration for non current versions only\n // To fix a bug in ECS 3.0 where an expiration for non current version can't be set if there's no expiration set for current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>1000000</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n }\n }\n lifecyclePolicyConfiguration += `\n </Rule>\n </LifecycleConfiguration>\n `\n lifecyclePolicyResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?lifecycle\", lifecyclePolicyHeaders, lifecyclePolicyConfiguration)\n if lifecyclePolicyResponse.Code != 200 {\n lifecyclePolicyStatusOK = false\n }\n }\n if versioningStatusOK && lifecyclePolicyStatusOK {\n rendering.JSON(w, http.StatusOK, \"\")\n } else {\n message := \"\"\n if !versioningStatusOK {\n message += \" Versioning can't be enabled.\"\n }\n if !lifecyclePolicyStatusOK {\n message += \" Expiration can't be set.\"\n }\n rendering.JSON(w, http.StatusOK, message)\n }\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, s3.AccessKey, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func (o *CreateStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func NewCreateStorageV1CSINodeCreated() *CreateStorageV1CSINodeCreated {\n\n\treturn &CreateStorageV1CSINodeCreated{}\n}", "func NewCreateStorageV1CSINodeUnauthorized() *CreateStorageV1CSINodeUnauthorized {\n\n\treturn &CreateStorageV1CSINodeUnauthorized{}\n}", "func NewWatchStorageV1CSINodeListOK() *WatchStorageV1CSINodeListOK {\n\treturn &WatchStorageV1CSINodeListOK{}\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n session, err := store.Get(r, \"session-name\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n s3 := S3{\n EndPointString: session.Values[\"Endpoint\"].(string),\n AccessKey: session.Values[\"AccessKey\"].(string),\n SecretKey: session.Values[\"SecretKey\"].(string),\n Namespace: session.Values[\"Namespace\"].(string),\n }\n\n decoder := json.NewDecoder(r.Body)\n var bucket NewBucket\n err = decoder.Decode(&bucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n\n // Add the necessary headers for Metadata Search and Access During Outage\n createBucketHeaders := map[string][]string{}\n createBucketHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n createBucketHeaders[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n createBucketHeaders[\"x-emc-metadata-search\"] = []string{\"ObjectName,x-amz-meta-image-width;Integer,x-amz-meta-image-height;Integer,x-amz-meta-gps-latitude;Decimal,x-amz-meta-gps-longitude;Decimal\"}\n\n createBucketResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/\", createBucketHeaders, \"\")\n\n // Enable CORS after the bucket creation to allow the web browser to send requests directly to ECS\n if createBucketResponse.Code == 200 {\n enableBucketCorsHeaders := map[string][]string{}\n enableBucketCorsHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n corsConfiguration := `\n <CORSConfiguration>\n <CORSRule>\n <AllowedOrigin>*</AllowedOrigin>\n <AllowedHeader>*</AllowedHeader>\n <ExposeHeader>x-amz-meta-image-width</ExposeHeader>\n <ExposeHeader>x-amz-meta-image-height</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-latitude</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-longitude</ExposeHeader>\n <AllowedMethod>HEAD</AllowedMethod>\n <AllowedMethod>GET</AllowedMethod>\n <AllowedMethod>PUT</AllowedMethod>\n <AllowedMethod>POST</AllowedMethod>\n <AllowedMethod>DELETE</AllowedMethod>\n </CORSRule>\n </CORSConfiguration>\n `\n enableBucketCorsResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/?cors\", enableBucketCorsHeaders, corsConfiguration)\n if enableBucketCorsResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, struct {\n CorsConfiguration string `json:\"cors_configuration\"`\n Bucket string `json:\"bucket\"`\n } {\n CorsConfiguration: corsConfiguration,\n Bucket: bucket.Name,\n })\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket created, but CORS can't be enabled\"}\n }\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket can't be created\"}\n }\n return nil\n}", "func NewReplaceStorageV1CSINodeOK() *ReplaceStorageV1CSINodeOK {\n\n\treturn &ReplaceStorageV1CSINodeOK{}\n}", "func NewCreateStorageV1CSINodeAccepted() *CreateStorageV1CSINodeAccepted {\n\n\treturn &CreateStorageV1CSINodeAccepted{}\n}", "func CreateDescribeLogstoreStorageRequest() (request *DescribeLogstoreStorageRequest) {\n\trequest = &DescribeLogstoreStorageRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"DescribeLogstoreStorage\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}", "func (client IdentityClient) createTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagDefaults\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewReplaceStorageV1CSINodeOK() *ReplaceStorageV1CSINodeOK {\n\treturn &ReplaceStorageV1CSINodeOK{}\n}", "func NewCreateIOCDefault(code int) *CreateIOCDefault {\n\treturn &CreateIOCDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}", "func newNs(ctx context.Context, cl client.Client, name string) error {\n\tns := &corev1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t}\n\tif err := cl.Create(ctx, ns); err != nil {\n\t\tif !errors.IsAlreadyExists(err) {\n\t\t\treturn fmt.Errorf(\"failed to create namespace %s: %v\", ns.Name, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func New() gocsi.StoragePluginProvider {\n\tsvc := service.New()\n\treturn &gocsi.StoragePlugin{\n\t\tController: svc,\n\t\tIdentity: svc,\n\t\tNode: svc,\n\t\tBeforeServe: svc.BeforeServe,\n\t\tRegisterAdditionalServers: svc.RegisterAdditionalServers,\n\n\t\tEnvVars: []string{\n\t\t\t// Enable request validation\n\t\t\tgocsi.EnvVarSpecReqValidation + \"=true\",\n\n\t\t\t// Enable serial volume access\n\t\t\tgocsi.EnvVarSerialVolAccess + \"=true\",\n\t\t},\n\t}\n}", "func (client StorageGatewayClient) createStorageGateway(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateStorageGatewayResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (sdk *SDK) NewNode(prefer *cloudsvr.PreferAttrs) (*cloudsvr.CloudNode, *cloudsvr.PreferAttrs, error) {\n\n\tvar (\n\t\tpassword, _ = utils.GenPassword(24)\n\t\treq = &CreateInstanceRequest{\n\t\t\tImageID: OsImage,\n\t\t\tPassword: password,\n\t\t\tInstanceName: NodeName,\n\t\t\tInstanceChargeType: \"PostPaid\", // require RMB 100+\n\t\t\tSecurityGroupID: \"whatever\", // will be automatic rewrite\n\t\t\tInternetChargeType: \"PayByTraffic\", // traffic payment\n\t\t\tInternetMaxBandwidthOut: \"100\", // 100M\n\t\t\tLabels: NodeLabels,\n\t\t}\n\t)\n\n\t// if prefered attributes set, use prefer region & instance-type\n\tif prefer != nil && prefer.Valid() == nil {\n\t\tvar (\n\t\t\treg = prefer.RegionOrZone\n\t\t\ttyp = prefer.InstanceType\n\t\t)\n\t\tlog.Printf(\"create aliyun ecs by using prefered region %s, instance type %s ...\", reg, typ)\n\n\t\treq.RegionID = reg // cn-beijing\n\t\treq.InstanceType = typ // ecs.n4.large\n\n\t\tcreated, err := sdk.createNode(req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tlog.Printf(\"created prefered aliyun ecs succeed: %s\", created.ID)\n\t\treturn created, prefer, nil\n\t}\n\n\tlog.Infoln(\"creating aliyun ecs by trying all regions & types ...\")\n\n\t// if prefered created failed, or without prefer region & instance-type\n\t// try best on all region & instance-types to create the new aliyun ecs\n\tvar (\n\t\tregions []RegionType // all of aliyun regions\n\t\ttypes []InstanceTypeItemType // all of instance types within given range of mems & cpus\n\t\terr error\n\t\tcreated *cloudsvr.CloudNode\n\t)\n\n\t// list all regions\n\tregions, err = sdk.ListRegions()\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListRegions() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\t// list specified range of instance types\n\ttypes, err = sdk.ListInstanceTypes(2, 4, 2, 8) // TODO range of given cpus/mems ranges\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListInstanceTypes() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tvar (\n\t\tuseRegionID, useInsType string\n\t)\n\t// range all regions & types to try to create ecs instance\n\tfor _, reg := range regions {\n\t\tfor _, typ := range types {\n\t\t\treq.RegionID = reg.RegionID // cn-beijing\n\t\t\treq.InstanceType = typ.InstanceTypeID // ecs.n4.large\n\n\t\t\t// if created succeed, directly return\n\t\t\tcreated, err = sdk.createNode(req)\n\t\t\tif err == nil {\n\t\t\t\tuseRegionID, useInsType = reg.RegionID, typ.InstanceTypeID\n\t\t\t\tgoto END\n\t\t\t}\n\n\t\t\tif sdk.isFatalError(err) {\n\t\t\t\tlog.Errorf(\"create aliyun ecs got fatal error, stop retry: %v\", err)\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tlog.Warnf(\"create aliyun ecs failed: %v, will retry another region or type\", err)\n\t\t}\n\t}\n\nEND:\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog.Printf(\"created aliyun ecs %s at %s and type is %s\", created.ID, useRegionID, useInsType)\n\treturn created, &cloudsvr.PreferAttrs{RegionOrZone: useRegionID, InstanceType: useInsType}, nil\n}", "func NewCreateClusterRequestWithoutParam() *CreateClusterRequest {\n\n return &CreateClusterRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/clusters\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (o *ReplaceStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *ReplaceStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func NewCreateIOCOK() *CreateIOCOK {\n\treturn &CreateIOCOK{}\n}", "func (d *driver) CreateStorage(cr *opapi.ImageRegistry, modified *bool) error {\n\tsvc, err := d.getSVC()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tic, err := util.GetInstallConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcv, err := util.GetClusterVersionConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := 0; i < 5000; i++ {\n\t\tif len(d.Config.Bucket) == 0 {\n\t\t\td.Config.Bucket = fmt.Sprintf(\"%s-%s-%s-%s\", clusterconfig.StoragePrefix, d.Config.Region, strings.Replace(string(cv.Spec.ClusterID), \"-\", \"\", -1), strings.Replace(string(uuid.NewUUID()), \"-\", \"\", -1))[0:62]\n\t\t}\n\n\t\t_, err := svc.CreateBucket(&s3.CreateBucketInput{\n\t\t\tBucket: aws.String(d.Config.Bucket),\n\t\t})\n\t\tif err != nil {\n\t\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\t\tswitch aerr.Code() {\n\t\t\t\tcase s3.ErrCodeBucketAlreadyExists:\n\t\t\t\t\tif cr.Spec.Storage.S3.Bucket != \"\" {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\td.Config.Bucket = \"\"\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tutil.UpdateCondition(cr, opapi.StorageExists, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\n\tif len(cr.Spec.Storage.S3.Bucket) == 0 && len(d.Config.Bucket) == 0 {\n\t\tutil.UpdateCondition(cr, opapi.StorageExists, operatorapi.ConditionFalse, \"Unable to Generate Unique Bucket Name\", \"\", modified)\n\t\treturn fmt.Errorf(\"unable to generate a unique s3 bucket name\")\n\t}\n\n\t// Wait until the bucket exists\n\tif err := svc.WaitUntilBucketExists(&s3.HeadBucketInput{\n\t\tBucket: aws.String(d.Config.Bucket),\n\t}); err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageExists, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t}\n\n\t\treturn err\n\t}\n\n\t// Tag the bucket with the openshiftClusterID\n\t// along with any user defined tags from the cluster configuration\n\tif ic.Platform.AWS != nil {\n\t\tvar tagSet []*s3.Tag\n\t\ttagSet = append(tagSet, &s3.Tag{Key: aws.String(\"openshiftClusterID\"), Value: aws.String(string(cv.Spec.ClusterID))})\n\t\tfor k, v := range ic.Platform.AWS.UserTags {\n\t\t\ttagSet = append(tagSet, &s3.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t\t}\n\n\t\t_, err := svc.PutBucketTagging(&s3.PutBucketTaggingInput{\n\t\t\tBucket: aws.String(d.Config.Bucket),\n\t\t\tTagging: &s3.Tagging{\n\t\t\t\tTagSet: tagSet,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\t\tutil.UpdateCondition(cr, opapi.StorageTagged, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t\t} else {\n\t\t\t\tutil.UpdateCondition(cr, opapi.StorageTagged, operatorapi.ConditionFalse, \"Unknown Error Occurred\", err.Error(), modified)\n\t\t\t}\n\t\t} else {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageTagged, operatorapi.ConditionTrue, \"Tagging Successful\", \"UserTags were successfully applied to the S3 bucket\", modified)\n\t\t}\n\t}\n\n\t// Enable default encryption on the bucket\n\t_, err = svc.PutBucketEncryption(&s3.PutBucketEncryptionInput{\n\t\tBucket: aws.String(d.Config.Bucket),\n\t\tServerSideEncryptionConfiguration: &s3.ServerSideEncryptionConfiguration{\n\t\t\tRules: []*s3.ServerSideEncryptionRule{\n\t\t\t\t{\n\t\t\t\t\tApplyServerSideEncryptionByDefault: &s3.ServerSideEncryptionByDefault{\n\t\t\t\t\t\tSSEAlgorithm: aws.String(s3.ServerSideEncryptionAes256),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageEncrypted, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t} else {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageEncrypted, operatorapi.ConditionFalse, \"Unknown Error Occurred\", err.Error(), modified)\n\t\t}\n\t} else {\n\t\tutil.UpdateCondition(cr, opapi.StorageEncrypted, operatorapi.ConditionTrue, \"Encryption Successful\", \"Default encryption was successfully enabled on the S3 bucket\", modified)\n\t}\n\n\t// Enable default incomplete multipart upload cleanup after one (1) day\n\t_, err = svc.PutBucketLifecycleConfiguration(&s3.PutBucketLifecycleConfigurationInput{\n\t\tBucket: aws.String(d.Config.Bucket),\n\t\tLifecycleConfiguration: &s3.BucketLifecycleConfiguration{\n\t\t\tRules: []*s3.LifecycleRule{\n\t\t\t\t{\n\t\t\t\t\tID: aws.String(\"cleanup-incomplete-multipart-registry-uploads\"),\n\t\t\t\t\tStatus: aws.String(\"Enabled\"),\n\t\t\t\t\tFilter: &s3.LifecycleRuleFilter{\n\t\t\t\t\t\tPrefix: aws.String(\"\"),\n\t\t\t\t\t},\n\t\t\t\t\tAbortIncompleteMultipartUpload: &s3.AbortIncompleteMultipartUpload{\n\t\t\t\t\t\tDaysAfterInitiation: aws.Int64(1),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageIncompleteUploadCleanupEnabled, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t} else {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageIncompleteUploadCleanupEnabled, operatorapi.ConditionFalse, \"Unknown Error Occurred\", err.Error(), modified)\n\t\t}\n\t} else {\n\t\tutil.UpdateCondition(cr, opapi.StorageIncompleteUploadCleanupEnabled, operatorapi.ConditionTrue, \"Enable Cleanup Successful\", \"Default cleanup of incomplete multipart uploads after one (1) day was successfully enabled\", modified)\n\t}\n\n\tcr.Status.Storage.State.S3 = d.Config\n\tcr.Status.Storage.Managed = true\n\n\tutil.UpdateCondition(cr, opapi.StorageExists, operatorapi.ConditionTrue, \"Creation Successful\", \"S3 bucket was successfully created\", modified)\n\n\treturn nil\n}", "func NewHeaders()(*Headers) {\n m := &Headers{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (c *Creator) New() (filestorage.FileStorage, error) {\n\tfs := New(c.apiKey, c.secret)\n\treturn fs, fs.SetBucket(c.defaultBucket)\n}", "func NewStorage(opts generic.RESTOptions, connection client.ConnectionInfoGetter, proxyTransport http.RoundTripper) NodeStorage {\n\tprefix := \"/minions\"\n\n\tnewListFunc := func() runtime.Object { return &api.NodeList{} }\n\tstorageInterface := opts.Decorator(\n\t\topts.Storage, cachesize.GetWatchCacheSizeByResource(cachesize.Nodes), &api.Node{}, prefix, node.Strategy, newListFunc)\n\n\tstore := &etcdgeneric.Etcd{\n\t\tNewFunc: func() runtime.Object { return &api.Node{} },\n\t\tNewListFunc: newListFunc,\n\t\tKeyRootFunc: func(ctx api.Context) string {\n\t\t\treturn prefix\n\t\t},\n\t\tKeyFunc: func(ctx api.Context, name string) (string, error) {\n\t\t\treturn etcdgeneric.NoNamespaceKeyFunc(ctx, prefix, name)\n\t\t},\n\t\tObjectNameFunc: func(obj runtime.Object) (string, error) {\n\t\t\treturn obj.(*api.Node).Name, nil\n\t\t},\n\t\tPredicateFunc: node.MatchNode,\n\t\tQualifiedResource: api.Resource(\"nodes\"),\n\t\tDeleteCollectionWorkers: opts.DeleteCollectionWorkers,\n\n\t\tCreateStrategy: node.Strategy,\n\t\tUpdateStrategy: node.Strategy,\n\t\tExportStrategy: node.Strategy,\n\n\t\tStorage: storageInterface,\n\t}\n\n\tstatusStore := *store\n\tstatusStore.UpdateStrategy = node.StatusStrategy\n\n\tnodeREST := &REST{store, connection, proxyTransport}\n\n\treturn NodeStorage{\n\t\tNode: nodeREST,\n\t\tStatus: &StatusREST{store: &statusStore},\n\t\tProxy: &noderest.ProxyREST{Store: store, Connection: client.ConnectionInfoGetter(nodeREST), ProxyTransport: proxyTransport},\n\t}\n}", "func (c *OutputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (s *StorageClusterAPI) Create(w http.ResponseWriter, r *http.Request) {\n\tstorage := &config.StorageCluster{}\n\terr := api.GetJSONBodyFromRequest(r, storage)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\terr = s.storageClusterService.Save(storage)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\tapi.NoContent(w)\n}", "func (s *SmartContract) CreateOi(ctx contractapi.TransactionContextInterface, oiNumber string, saudacao string, despedida string, oidenovo string, pessoa string) error {\n\tOi := Oi{\n\t\tSaudacao: saudacao,\n\t\tDespedida: despedida,\n\t\tOidenovo: oidenovo,\n\t\tPessoa: pessoa,\n\t}\n\n\toiAsBytes, _ := json.Marshal(Oi)\n\n\treturn ctx.GetStub().PutState(oiNumber, oiAsBytes)\n}", "func (service *S3Service) newS3Request() *S3Request {\n return NewS3Request(service.accessKey, service.secretKey)\n}", "func NewStorage(namespace, name string) (*Storage, error) {\n\tif err := k8sutil.CreateCRD(name); err != nil {\n\t\treturn nil, err\n\t}\n\tcli := k8sutil.NewRESTClient()\n\treturn &Storage{\n\t\tNamespace: namespace,\n\t\tName: strings.ToLower(name),\n\t\trestcli: cli,\n\t}, nil\n}", "func createOIDCIssuer(client *azureclients.AzureClientWrapper, name, region, oidcResourceGroupName, storageAccountName, blobContainerName, subscriptionID, tenantID, publicKeyPath, outputDir string, resourceTags map[string]string, dryRun bool) (string, error) {\n\t// Add CCO's \"owned\" tag to resource tags map\n\tresourceTags[fmt.Sprintf(\"%s_%s\", ownedAzureResourceTagKeyPrefix, name)] = ownedAzureResourceTagValue\n\n\tstorageAccountKey := \"\"\n\tif !dryRun {\n\t\t// Ensure that the public key file can be read at the publicKeyPath before continuing\n\t\t_, err := os.ReadFile(publicKeyPath)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"unable to read public key file\")\n\t\t}\n\n\t\t// Ensure the resource group exists\n\t\terr = ensureResourceGroup(client, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure resource group\")\n\t\t}\n\n\t\t// Ensure storage account exists\n\t\terr = ensureStorageAccount(client, storageAccountName, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure storage account\")\n\t\t}\n\n\t\tstorageAccountKey, err = getStorageAccountKey(client, storageAccountName, oidcResourceGroupName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to get storage account key\")\n\t\t}\n\n\t\t// Ensure blob container exists\n\t\terr = ensureBlobContainer(client, oidcResourceGroupName, storageAccountName, blobContainerName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to create blob container\")\n\t\t}\n\t}\n\n\t// Upload OIDC documents (openid-configuration, jwks.json) to the blob container\n\toutputDirAbsPath, err := filepath.Abs(outputDir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tissuerURL, err := uploadOIDCDocuments(client, storageAccountName, storageAccountKey, publicKeyPath, blobContainerName, outputDirAbsPath, dryRun, resourceTags)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to upload OIDC documents\")\n\t}\n\n\t// Write cluster authentication object installer manifest cluster-authentication-02-config.yaml\n\t// for our issuerURL within outputDir/manifests\n\tif err = provisioning.CreateClusterAuthentication(issuerURL, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create cluster authentication manifest\")\n\t}\n\n\t// Write Azure AD pod identity webhook config secret azure-ad-pod-identity-webhook-config.yaml\n\t// within outputDir/manifests\n\tif err = createPodIdentityWebhookConfigSecret(tenantID, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create Azure AD pod identity webhook manifest\")\n\t}\n\n\treturn issuerURL, nil\n}", "func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (d *Driver) Create() error {\n\n\tvolume := NodesNodeStorageStorageContentPostParameter{\n\t\tFilename: d.StorageFilename,\n\t\tSize: d.DiskSize + \"G\",\n\t\tVMID: d.VMID,\n\t}\n\n\td.debugf(\"Creating disk volume '%s' with size '%s'\", volume.Filename, volume.Size)\n\tdiskname, err := d.driver.NodesNodeStorageStorageContentPost(d.Node, d.Storage, &volume)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !strings.HasSuffix(diskname, d.StorageFilename) {\n\t\treturn fmt.Errorf(\"returned diskname is not correct: should be '%s' but was '%s'\", d.StorageFilename, diskname)\n\t}\n\n\tnpp := NodesNodeQemuPostParameter{\n\t\tVMID: d.VMID,\n\t\tAgent: \"1\",\n\t\tAutostart: \"1\",\n\t\tMemory: d.Memory,\n\t\tCores: d.Cores,\n\t\tNet0: fmt.Sprintf(\"virtio,bridge=%s\", d.NetBridge),\n\t\tSCSI0: d.StorageFilename,\n\t\tOstype: \"l26\",\n\t\tName: d.BaseDriver.MachineName,\n\t\tKVM: \"1\", // if you test in a nested environment, you may have to change this to 0 if you do not have nested virtualization\n\t\tCdrom: d.ImageFile,\n\t\tPool: d.Pool,\n\t}\n\n\tif d.NetVlanTag != 0 {\n\t\tnpp.Net0 = fmt.Sprintf(\"virtio,bridge=%s,tag=%d\", d.NetBridge, d.NetVlanTag)\n\t}\n\n\tif d.StorageType == \"qcow2\" {\n\t\tnpp.SCSI0 = d.Storage + \":\" + d.VMID + \"/\" + volume.Filename\n\t} else if d.StorageType == \"raw\" {\n\t\tif strings.HasSuffix(volume.Filename, \".raw\") {\n\t\t\t// raw files (having .raw) should have the VMID in the path\n\t\t\tnpp.SCSI0 = d.Storage + \":\" + d.VMID + \"/\" + volume.Filename\n\t\t} else {\n\t\t\tnpp.SCSI0 = d.Storage + \":\" + volume.Filename\n\t\t}\n\t}\n\td.debugf(\"Creating VM '%s' with '%d' of memory\", npp.VMID, npp.Memory)\n\ttaskid, err := d.driver.NodesNodeQemuPost(d.Node, &npp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = d.driver.WaitForTaskToComplete(d.Node, taskid)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn d.waitAndPrepareSSH()\n}", "func (c *Client) newRequest(method, path string, v interface{}, ctype string) (req *http.Request, err error) {\n\t// Build request JSON.\n\tbody, err := writeJson(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err = http.NewRequest(method, c.pathToEndPoint(path), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"X-Kii-AppID\", c.AppId)\n\treq.Header.Add(\"X-Kii-AppKey\", c.AppKey)\n\tif ctype != \"\" {\n\t\treq.Header.Add(\"Content-Type\", ctype)\n\t}\n\tif c.Authorization != \"\" {\n\t\treq.Header.Add(\"Authorization\", c.Authorization)\n\t}\n\treturn\n}", "func createStorageProfile(masterIp string, sshClientConfig *ssh.ClientConfig,\n\tstoragePolicyName string, clientIndex int) error {\n\tcreateStoragePolicy := govcLoginCmdForMultiVC(clientIndex) +\n\t\t\"govc storage.policy.create -category=shared-cat-todelete1 -tag=shared-tag-todelete1 \" + storagePolicyName\n\tframework.Logf(\"Create storage policy: %s \", createStoragePolicy)\n\tcreateStoragePolicytRes, err := sshExec(sshClientConfig, masterIp, createStoragePolicy)\n\tif err != nil && createStoragePolicytRes.Code != 0 {\n\t\tfssh.LogResult(createStoragePolicytRes)\n\t\treturn fmt.Errorf(\"couldn't execute command: %s on host: %v , error: %s\",\n\t\t\tcreateStoragePolicy, masterIp, err)\n\t}\n\treturn nil\n}", "func (c *OutputService15ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) {\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"restype\", \"container\")\n\tif options != nil && options.Timeout != nil {\n\t\treqQP.Set(\"timeout\", strconv.FormatInt(int64(*options.Timeout), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.Metadata != nil {\n\t\tfor k, v := range options.Metadata {\n\t\t\tif v != nil {\n\t\t\t\treq.Raw().Header[\"x-ms-meta-\"+k] = []string{*v}\n\t\t\t}\n\t\t}\n\t}\n\tif options != nil && options.Access != nil {\n\t\treq.Raw().Header[\"x-ms-blob-public-access\"] = []string{string(*options.Access)}\n\t}\n\treq.Raw().Header[\"x-ms-version\"] = []string{\"2020-10-02\"}\n\tif options != nil && options.RequestID != nil {\n\t\treq.Raw().Header[\"x-ms-client-request-id\"] = []string{*options.RequestID}\n\t}\n\tif containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil {\n\t\treq.Raw().Header[\"x-ms-default-encryption-scope\"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope}\n\t}\n\tif containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil {\n\t\treq.Raw().Header[\"x-ms-deny-encryption-scope-override\"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/xml\"}\n\treturn req, nil\n}", "func newS3Storage(backend *backup.S3) (*S3Storage, error) {\n\tqs := *backend\n\tawsConfig := aws.NewConfig().\n\t\tWithMaxRetries(maxRetries).\n\t\tWithS3ForcePathStyle(qs.ForcePathStyle).\n\t\tWithRegion(qs.Region)\n\tif qs.Endpoint != \"\" {\n\t\tawsConfig.WithEndpoint(qs.Endpoint)\n\t}\n\tvar cred *credentials.Credentials\n\tif qs.AccessKey != \"\" && qs.SecretAccessKey != \"\" {\n\t\tcred = credentials.NewStaticCredentials(qs.AccessKey, qs.SecretAccessKey, \"\")\n\t}\n\tif cred != nil {\n\t\tawsConfig.WithCredentials(cred)\n\t}\n\t// awsConfig.WithLogLevel(aws.LogDebugWithSigning)\n\tawsSessionOpts := session.Options{\n\t\tConfig: *awsConfig,\n\t}\n\tses, err := session.NewSessionWithOptions(awsSessionOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !sendCredential {\n\t\t// Clear the credentials if exists so that they will not be sent to TiKV\n\t\tbackend.AccessKey = \"\"\n\t\tbackend.SecretAccessKey = \"\"\n\t} else if ses.Config.Credentials != nil {\n\t\tif qs.AccessKey == \"\" || qs.SecretAccessKey == \"\" {\n\t\t\tv, cerr := ses.Config.Credentials.Get()\n\t\t\tif cerr != nil {\n\t\t\t\treturn nil, cerr\n\t\t\t}\n\t\t\tbackend.AccessKey = v.AccessKeyID\n\t\t\tbackend.SecretAccessKey = v.SecretAccessKey\n\t\t}\n\t}\n\n\tc := s3.New(ses)\n\terr = checkS3Bucket(c, qs.Bucket)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"Bucket %s is not accessible: %v\", qs.Bucket, err)\n\t}\n\n\tqs.Prefix += \"/\"\n\treturn &S3Storage{\n\t\tsession: ses,\n\t\tsvc: c,\n\t\toptions: &qs,\n\t}, nil\n}", "func (service *ContrailService) CreateContrailStorageNode(\n\tctx context.Context,\n\trequest *models.CreateContrailStorageNodeRequest) (*models.CreateContrailStorageNodeResponse, error) {\n\tmodel := request.ContrailStorageNode\n\tif model.UUID == \"\" {\n\t\tmodel.UUID = uuid.NewV4().String()\n\t}\n\tauth := common.GetAuthCTX(ctx)\n\tif auth == nil {\n\t\treturn nil, common.ErrorUnauthenticated\n\t}\n\n\tif model.FQName == nil {\n\t\tif model.DisplayName != \"\" {\n\t\t\tmodel.FQName = []string{auth.DomainID(), auth.ProjectID(), model.DisplayName}\n\t\t} else {\n\t\t\tmodel.FQName = []string{auth.DomainID(), auth.ProjectID(), model.UUID}\n\t\t}\n\t}\n\tmodel.Perms2 = &models.PermType2{}\n\tmodel.Perms2.Owner = auth.ProjectID()\n\tif err := common.DoInTransaction(\n\t\tservice.DB,\n\t\tfunc(tx *sql.Tx) error {\n\t\t\treturn db.CreateContrailStorageNode(ctx, tx, request)\n\t\t}); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err,\n\t\t\t\"resource\": \"contrail_storage_node\",\n\t\t}).Debug(\"db create failed on create\")\n\t\treturn nil, common.ErrorInternal\n\t}\n\treturn &models.CreateContrailStorageNodeResponse{\n\t\tContrailStorageNode: request.ContrailStorageNode,\n\t}, nil\n}", "func CreateIstioObjectsInK8S(configStore ConfigStore, bindingID string, name string, endpoint model.Endpoint, systemDomain string) (string, error) {\n\tservice := &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: servicePort, TargetPort: intstr.FromInt(servicePort)}}}}\n\tservice.Name = name\n\tlog.Infoa(\"Creating istio objects for \", name)\n\tservice, err := configStore.CreateService(bindingID, service)\n\tif err != nil {\n\t\tlog.Errora(\"error creating service:\", err.Error())\n\t\treturn \"\", err\n\t}\n\tconfigurations := config.CreateEntriesForExternalServiceClient(service.Name, endpoint.Host, service.Spec.ClusterIP, 9000, systemDomain)\n\terr = configStore.CreateIstioConfig(bindingID, configurations)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn service.Spec.ClusterIP, nil\n}", "func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (client *KeyVaultClient) setStorageAccountCreateRequest(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters, options *KeyVaultClientSetStorageAccountOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/storage/{storage-account-name}\"\n\tif storageAccountName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageAccountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storage-account-name}\", url.PathEscape(storageAccountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreatePolicyWithDefaults(nbmaster string, httpClient *http.Client, jwt string) {\r\n fmt.Printf(\"\\nSending a POST request to create %s with defaults...\\n\", testPolicyName)\r\n\r\n policy := map[string]interface{}{\r\n \"data\": map[string]interface{}{\r\n \"type\": \"policy\",\r\n \"id\": testPolicyName,\r\n \"attributes\": map[string]interface{}{\r\n \"policy\": map[string]interface{}{\r\n \"policyName\": testPolicyName,\r\n \"policyType\": \"VMware\",\r\n \"policyAttributes\": map[string]interface{}{},\r\n \"clients\":[]interface{}{},\r\n \"schedules\":[]interface{}{},\r\n \"backupSelections\": map[string]interface{}{\r\n \"selections\": []interface{}{}}}}}}\r\n\r\n policyRequest, _ := json.Marshal(policy)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/\" + policiesUri\r\n\r\n request, _ := http.NewRequest(http.MethodPost, uri, bytes.NewBuffer(policyRequest))\r\n request.Header.Add(\"Content-Type\", contentTypeV2);\r\n request.Header.Add(\"Authorization\", jwt);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to create policy.\\n\")\r\n } else {\r\n if response.StatusCode != 204 {\r\n printErrorResponse(response)\r\n } else {\r\n fmt.Printf(\"%s created successfully.\\n\", testPolicyName);\r\n responseDetails, _ := httputil.DumpResponse(response, true);\r\n fmt.Printf(string(responseDetails))\r\n }\r\n }\r\n}", "func CreateUploadIoTDataToBlockchainRequest() (request *UploadIoTDataToBlockchainRequest) {\n\trequest = &UploadIoTDataToBlockchainRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"lto\", \"2021-07-07\", \"UploadIoTDataToBlockchain\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func newStorageLayer(disk string) (storage StorageAPI, err error) {\n\tif !strings.ContainsRune(disk, ':') || filepath.VolumeName(disk) != \"\" {\n\t\t// Initialize filesystem storage API.\n\t\treturn newPosix(disk)\n\t}\n\t// Initialize rpc client storage API.\n\treturn newRPCClient(disk)\n}", "func EncodeStorageImagesCreateRequest(encoder func(*http.Request) goahttp.Encoder) func(*http.Request, interface{}) error {\n\treturn func(req *http.Request, v interface{}) error {\n\t\tp, ok := v.(*vm.Storage)\n\t\tif !ok {\n\t\t\treturn goahttp.ErrInvalidType(\"spin-registry\", \"storage_images_create\", \"*vm.Storage\", v)\n\t\t}\n\t\tbody := p\n\t\tif err := encoder(req).Encode(&body); err != nil {\n\t\t\treturn goahttp.ErrEncodingError(\"spin-registry\", \"storage_images_create\", err)\n\t\t}\n\t\treturn nil\n\t}\n}", "func CreateStorage(projectID, description, plan, facility, frequency string, size, count int) error {\n\tclient, err := NewExtPacketClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create a snapshot policy. If `count` is 0, we shall pass an empty object\n\tsnapshotPolicies := make([]*extpackngo.SnapshotPolicy, 1)\n\tif count != 0 {\n\t\tsnapshotPolicies = append(snapshotPolicies, &extpackngo.SnapshotPolicy{\n\t\t\tSnapshotFrequency: frequency,\n\t\t\tSnapshotCount: count})\n\t}\n\trequest := &extpackngo.StorageCreateRequest{\n\t\tDescription: description,\n\t\tPlan: plan,\n\t\tSize: size,\n\t\tFacility: facility,\n\t\tSnapshotPolicies: snapshotPolicies,\n\t}\n\n\tstorage, _, err := client.Storages.Create(projectID, request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te := MarshallAndPrint(storage)\n\treturn e\n}", "func NewRequest(c *RESTClient) *Request {\n\tvar pathPrefix string\n\tif c.base != nil {\n\t\tpathPrefix = path.Join(\"/\", c.base.Path, c.versionedAPIPath)\n\t} else {\n\t\tpathPrefix = path.Join(\"/\", c.versionedAPIPath)\n\t}\n\n\tr := &Request{\n\t\tc: c,\n\t\tpathPrefix: pathPrefix,\n\t}\n\n\tauthMethod := 0\n\n\tfor _, fn := range []func() bool{c.content.HasBasicAuth, c.content.HasTokenAuth, c.content.HasKeyAuth} {\n\t\tif fn() {\n\t\t\tauthMethod++\n\t\t}\n\t}\n\n\tif authMethod > 1 {\n\t\tr.err = fmt.Errorf(\n\t\t\t\"username/password or bearer token or secretID/secretKey may be set, but should use only one of them\",\n\t\t)\n\n\t\treturn r\n\t}\n\n\tswitch {\n\tcase c.content.HasTokenAuth():\n\t\tr.SetHeader(\"Authorization\", fmt.Sprintf(\"Bearer %s\", c.content.BearerToken))\n\tcase c.content.HasKeyAuth():\n\t\ttokenString := auth.Sign(c.content.SecretID, c.content.SecretKey, \"marmotedu-sdk-go\", c.group+\".marmotedu.com\")\n\t\tr.SetHeader(\"Authorization\", fmt.Sprintf(\"Bearer %s\", tokenString))\n\tcase c.content.HasBasicAuth():\n\t\t// TODO: get token and set header\n\t\tr.SetHeader(\"Authorization\", \"Basic \"+basicAuth(c.content.Username, c.content.Password))\n\t}\n\n\t// set accept content\n\tswitch {\n\tcase len(c.content.AcceptContentTypes) > 0:\n\t\tr.SetHeader(\"Accept\", c.content.AcceptContentTypes)\n\tcase len(c.content.ContentType) > 0:\n\t\tr.SetHeader(\"Accept\", c.content.ContentType+\", */*\")\n\t}\n\n\treturn r\n}", "func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (s *GenericStorage) New(gvk schema.GroupVersionKind) (runtime.Object, error) {\n\tobj, err := s.serializer.Scheme().New(gvk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Default either through the scheme, or the high-level serializer Object\n\tif gvk.Version == kruntime.APIVersionInternal {\n\t\tif err := s.serializer.DefaultInternal(obj); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\ts.serializer.Scheme().Default(obj)\n\t}\n\n\t// Cast to runtime.Object, and make sure it works\n\tmetaObj, ok := obj.(runtime.Object)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"can't convert to libgitops.runtime.Object\")\n\t}\n\t// Set the desired gvk from the caller of this Object\n\t// In practice, this means, although we created an internal type,\n\t// from defaulting external TypeMeta information was set. Set the\n\t// desired gvk here so it's correctly handled in all code that gets\n\t// the gvk from the Object\n\tmetaObj.SetGroupVersionKind(gvk)\n\treturn metaObj, nil\n}", "func (s *StorageBase) New(ctx context.Context, ttl time.Duration) (id string, err error) {\n\treturn \"\", ErrorDisabled\n}", "func (r Virtual_Guest_Block_Device_Template_Group) CreateFromIcos(configuration *datatypes.Container_Virtual_Guest_Block_Device_Template_Configuration) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) {\n\tparams := []interface{}{\n\t\tconfiguration,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest_Block_Device_Template_Group\", \"createFromIcos\", params, &r.Options, &resp)\n\treturn\n}", "func (c *OutputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (s *OidcService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func newStorageObject(URL string, source interface{}, fileInfo os.FileInfo) storage.Object {\n\tabstract := storage.NewAbstractStorageObject(URL, source, fileInfo)\n\tresult := &object{\n\t\tAbstractObject: abstract,\n\t}\n\tresult.AbstractObject.Object = result\n\treturn result\n}", "func NewCreateFileDefault(code int) *CreateFileDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &CreateFileDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (r ApiCreateHyperflexExtIscsiStoragePolicyRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexExtIscsiStoragePolicyRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}", "func NewDefault(m map[string]interface{}) (share.Manager, error) {\n\tc := &config{}\n\tif err := mapstructure.Decode(m, c); err != nil {\n\t\terr = errors.Wrap(err, \"error creating a new manager\")\n\t\treturn nil, err\n\t}\n\n\ts, err := metadata.NewCS3Storage(c.GatewayAddr, c.ProviderAddr, c.ServiceUserID, c.ServiceUserIdp, c.MachineAuthAPIKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexer := indexer.CreateIndexer(s)\n\n\tclient, err := pool.GetGatewayServiceClient(c.GatewayAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(client, s, indexer)\n}", "func (service *ContrailService) RESTCreateContrailStorageNode(c echo.Context) error {\n\trequestData := &models.CreateContrailStorageNodeRequest{}\n\tif err := c.Bind(requestData); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err,\n\t\t\t\"resource\": \"contrail_storage_node\",\n\t\t}).Debug(\"bind failed on create\")\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, \"Invalid JSON format\")\n\t}\n\tctx := c.Request().Context()\n\tresponse, err := service.CreateContrailStorageNode(ctx, requestData)\n\tif err != nil {\n\t\treturn common.ToHTTPError(err)\n\t}\n\treturn c.JSON(http.StatusCreated, response)\n}", "func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func Create(req clusterapi.Request) (clusterapi.ClusterAPI, error) {\n\t// Validates parameters\n\tif req.Name == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.Name: can't be empty\")\n\t}\n\tif req.CIDR == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.CIDR: can't be empty\")\n\t}\n\n\t// We need at first the Metadata container to be present\n\terr := utils.CreateMetadataContainer()\n\tif err != nil {\n\t\tfmt.Printf(\"failed to create Object Container: %s\\n\", err.Error())\n\t}\n\n\tvar network *pb.Network\n\tvar instance clusterapi.ClusterAPI\n\n\tlog.Printf(\"Creating infrastructure for cluster '%s'\", req.Name)\n\n\ttenant, err := utils.GetCurrentTenant()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Creates network\n\tlog.Printf(\"Creating Network 'net-%s'\", req.Name)\n\treq.Name = strings.ToLower(req.Name)\n\tnetwork, err = utils.CreateNetwork(\"net-\"+req.Name, req.CIDR)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to create Network '%s': %s\", req.Name, err.Error())\n\t\treturn nil, err\n\t}\n\n\tswitch req.Flavor {\n\tcase Flavor.DCOS:\n\t\treq.NetworkID = network.ID\n\t\treq.Tenant = tenant\n\t\tinstance, err = dcos.NewCluster(req)\n\t\tif err != nil {\n\t\t\t//utils.DeleteNetwork(network.ID)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Printf(\"Cluster '%s' created and initialized successfully\", req.Name)\n\treturn instance, nil\n}", "func (a *OsZonesApiService) CreateObjectStorageZone(ctx context.Context, body ObjectStorageZoneCreateReq) (ObjectStorageZoneResp, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload ObjectStorageZoneResp\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/os-zones/\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"Xms-Auth-Token\"] = key\n\t\t}\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarQueryParams.Add(\"token\", key)\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn successPayload, localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}", "func newStorage() *storage {\n\tr := make(map[string][]byte)\n\treturn &storage{\n\t\trepository: r,\n\t}\n}", "func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func newIoTConfigs(c *IotV1alpha1Client, namespace string) *ioTConfigs {\n\treturn &ioTConfigs{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func createNS(ctx context.Context, prefix string) string {\n\tnm := createNSName(prefix)\n\n\t// Create the namespace\n\tns := &corev1.Namespace{}\n\tns.Name = nm\n\tExpect(k8sClient.Create(ctx, ns)).Should(Succeed())\n\n\t// Wait for the Hierarchy singleton to be created\n\tsnm := types.NamespacedName{Namespace: nm, Name: tenancy.Singleton}\n\thier := &tenancy.Hierarchy{}\n\tEventually(func() error {\n\t\treturn k8sClient.Get(ctx, snm, hier)\n\t}).Should(Succeed())\n\n\treturn nm\n}", "func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (client *NetworkToNetworkInterconnectsClient) createCreateRequest(ctx context.Context, resourceGroupName string, networkFabricName string, networkToNetworkInterconnectName string, body NetworkToNetworkInterconnect, options *NetworkToNetworkInterconnectsClientBeginCreateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkFabrics/{networkFabricName}/networkToNetworkInterconnects/{networkToNetworkInterconnectName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif networkFabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkFabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkFabricName}\", url.PathEscape(networkFabricName))\n\tif networkToNetworkInterconnectName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkToNetworkInterconnectName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkToNetworkInterconnectName}\", url.PathEscape(networkToNetworkInterconnectName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (client *ReplicationvCentersClient) createCreateRequest(ctx context.Context, fabricName string, vcenterName string, addVCenterRequest AddVCenterRequest, options *ReplicationvCentersClientBeginCreateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationvCenters/{vcenterName}\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif fabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter fabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fabricName}\", url.PathEscape(fabricName))\n\tif vcenterName == \"\" {\n\t\treturn nil, errors.New(\"parameter vcenterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vcenterName}\", url.PathEscape(vcenterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, addVCenterRequest)\n}", "func (c *InputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func Create (w http.ResponseWriter, r *http.Request) {\n\t/* This is an SBC */\n\tif CREATED == false {\n\t\t/* Move the checking of ID up first to confirm this is allowed */\n\t\t/* Do most of start. Just don't download because that would be downloading from self */\n\t\t/* Get address and ID */\n\t\t/* Get port number and set that to ID */\n\t\t/* Save localhost as Addr */\n\t\tsplitHostPort := strings.Split(r.Host, \":\")\n\t\ti, err := strconv.ParseInt(splitHostPort[1], 10, 32)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(500)\n\t\t\tpanic(err)\n\t\t}\n\t\t/* ID is now port number. Address is now correct Address */\n\t\tID = int32(i)\n\t\tSELF_ADDR = r.Host\n\t\t/* Check if ID is allowed in ALLOWED_IDs */\n\t\tif _, ok := ALLOWED_IDS[ID]; ok {\n\t\t\tnewBlockChain := data.NewBlockChain()\n\n\t\t\tmpt1 := p1.MerklePatriciaTrie{}\n\t\t\tmpt1.Initial()\n\t\t\tmpt1.Insert(\"1\", \"Origin\")\n\n\t\t\tmpt2 := p1.MerklePatriciaTrie{}\n\t\t\tmpt2.Initial()\n\t\t\tmpt2.Insert(\"1\", \"Decoy1\")\n\n\t\t\tmpt3 := p1.MerklePatriciaTrie{}\n\t\t\tmpt3.Initial()\n\t\t\tmpt3.Insert(\"1\", \"Decoy2\")\n\n\t\t\tmpt4 := p1.MerklePatriciaTrie{}\n\t\t\tmpt4.Initial()\n\t\t\tmpt4.Insert(\"1\", \"Decoy3\")\n\n\t\t\thexPubKey := hexutil.Encode(signature_p.PUBLIC_KEY)\n\t\t\tnewBlockChain.GenBlock(mpt1, hexPubKey)\n\t\t\tnewBlockChain.GenBlock(mpt2, hexPubKey)\n\t\t\tnewBlockChain.GenBlock(mpt3, hexPubKey)\n\t\t\tnewBlockChain.GenBlock(mpt4, hexPubKey)\n\t\t\t/* Set Global variable SBC to be this new blockchain */\n\t\t\tSBC = newBlockChain\n\t\t\t/* Generate Multiple Blocks Initially */\n\t\t\t\t\n\t\t\tblockChainJson, _ := SBC.BlockChainToJson()\n\t\t\t/* Write this to the server */\n\t\t\tw.Write([]byte(blockChainJson))\n\n\t\t\t/* Need to instantiate the peer list */\n\t\t\tPeers = data.NewPeerList(ID, 32)\n\t\t\tBALLOT = ReadDataFromBallot()\n\t\t\tCREATED = true\n\t\t}\n\t}\n}", "func Create(c *gophercloud.ServiceClient, containerName, objectName string, content io.ReadSeeker, opts CreateOptsBuilder) CreateResult {\n\tvar res CreateResult\n\n\turl := createURL(c, containerName, objectName)\n\th := make(map[string]string)\n\n\tif opts != nil {\n\t\theaders, query, err := opts.ToObjectCreateParams()\n\t\tif err != nil {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\n\t\tfor k, v := range headers {\n\t\t\th[k] = v\n\t\t}\n\n\t\turl += query\n\t}\n\n\thash := md5.New()\n\tbufioReader := bufio.NewReader(io.TeeReader(content, hash))\n\tio.Copy(ioutil.Discard, bufioReader)\n\tlocalChecksum := hash.Sum(nil)\n\n\th[\"ETag\"] = fmt.Sprintf(\"%x\", localChecksum)\n\n\t_, err := content.Seek(0, 0)\n\tif err != nil {\n\t\tres.Err = err\n\t\treturn res\n\t}\n\n\tropts := gophercloud.RequestOpts{\n\t\tRawBody: content,\n\t\tMoreHeaders: h,\n\t}\n\n\tresp, err := c.Request(\"PUT\", url, ropts)\n\tif err != nil {\n\t\tres.Err = err\n\t\treturn res\n\t}\n\tif resp != nil {\n\t\tres.Header = resp.Header\n\t\tif resp.Header.Get(\"ETag\") == fmt.Sprintf(\"%x\", localChecksum) {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\t\tres.Err = fmt.Errorf(\"Local checksum does not match API ETag header\")\n\t}\n\n\treturn res\n}", "func createNewEmptyNode() Node {\n\tnextNewId--\n\treturn Node{\n\t\tId: nextNewId,\n\t\tVisible: true,\n\t\tTimestamp: time.Now().Format(\"2006-01-02T15:04:05Z\"),\n\t\tVersion: \"1\",\n\t}\n}", "func NewCreateImageFromSnapshotsRequestWithoutParam() *CreateImageFromSnapshotsRequest {\n\n return &CreateImageFromSnapshotsRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/images:createImageFromSnapshots\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func New(keyName, sigType string, hash crypto.Hash) *Info {\n\tnow := time.Now().UTC()\n\ta := make(map[string]interface{})\n\ta[\"sig.type\"] = sigType\n\ta[\"sig.keyname\"] = keyName\n\ta[\"sig.hash\"] = x509tools.HashNames[hash]\n\ta[\"sig.timestamp\"] = now\n\tif hostname, _ := os.Hostname(); hostname != \"\" {\n\t\ta[\"sig.hostname\"] = hostname\n\t}\n\treturn &Info{Attributes: a, StartTime: now}\n}", "func InitStorage(service string, bucket string) {\n\ttransferType = service\n\tbenchName = bucket\n\tawsAccessKey, ok := os.LookupEnv(\"AWS_ACCESS_KEY\")\n\tif ok {\n\t\tAKID = awsAccessKey\n\t}\n\tawsSecretKey, ok := os.LookupEnv(\"AWS_SECRET_KEY\")\n\tif ok {\n\t\tSECRET_KEY = awsSecretKey\n\t}\n\tAWS_S3_REGION = \"us-west-1\"\n\tawsRegion, ok := os.LookupEnv(\"AWS_REGION\")\n\tif ok {\n\t\tAWS_S3_REGION = awsRegion\n\t}\n\tif transferType == S3 {\n\t\tvar err error\n\t\ts3session, err = session.NewSession(&aws.Config{\n\t\t\tRegion: aws.String(AWS_S3_REGION),\n\t\t\tCredentials: credentials.NewStaticCredentials(AKID, SECRET_KEY, TOKEN),\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed establish s3 session: %s\", err)\n\t\t}\n\t} else if transferType == ELASTICACHE {\n\t\tredisClient = redis.NewClient(&redis.Options{\n\t\t\tAddr: benchName,\n\t\t\tPassword: \"\", // no password set\n\t\t\tDB: 0, // use default DB\n\t\t})\n\t}\n}", "func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) {\n\t// Add your code here:\n\t// * Make API calls (use req.Session)\n\t// * Mutate the model\n\t// * Check/set any callback context (req.CallbackContext / response.CallbackContext)\n\n\tiotSvc := iot.New(req.Session)\n\ts3Svc := s3.New(req.Session)\n\n\ttestKey := req.LogicalResourceID\n\t_, err := s3Svc.PutObject(&s3.PutObjectInput{\n\t\tBucket: currentModel.Bucket,\n\t\tKey: &testKey,\n\t\tBody: strings.NewReader(\"test\"),\n\t})\n\tif err != nil {\n\t\taerr, ok := err.(awserr.Error)\n\t\tif ok {\n\t\t\tfmt.Printf(\"%v\", aerr)\n\t\t}\n\t\tresponse := handler.ProgressEvent{\n\t\t\tOperationStatus: handler.Failed,\n\t\t\tMessage: \"Bucket is not accessible\",\n\t\t}\n\t\treturn response, nil\n\t}\n\t_, err = s3Svc.DeleteObject(&s3.DeleteObjectInput{\n\t\tBucket: currentModel.Bucket,\n\t\tKey: &testKey,\n\t})\n\tactive := false\n\tif currentModel.Status != nil && strings.Compare(*currentModel.Status, \"ACTIVE\") == 0 {\n\t\tactive = true\n\t}\n\tres, err := iotSvc.CreateKeysAndCertificate(&iot.CreateKeysAndCertificateInput{\n\t\tSetAsActive: &active,\n\t})\n\n\tif err != nil {\n\t\taerr, ok := err.(awserr.Error)\n\t\tif ok {\n\t\t\tfmt.Printf(\"%v\", aerr)\n\t\t}\n\t\tresponse := handler.ProgressEvent{\n\t\t\tOperationStatus: handler.Failed,\n\t\t\tMessage: fmt.Sprintf(\"Failed: %s\", aerr.Error()),\n\t\t}\n\t\treturn response, nil\n\t}\n\n\tvar key string\n\tkey = fmt.Sprintf(\"%s.pem\", *res.CertificateId)\n\t_, err = s3Svc.PutObject(&s3.PutObjectInput{\n\t\tBucket: currentModel.Bucket,\n\t\tKey: &key,\n\t\tBody: strings.NewReader(*res.CertificatePem),\n\t})\n\tkey = fmt.Sprintf(\"%s.key\", *res.CertificateId)\n\t_, err = s3Svc.PutObject(&s3.PutObjectInput{\n\t\tBucket: currentModel.Bucket,\n\t\tKey: &key,\n\t\tBody: strings.NewReader(*res.KeyPair.PrivateKey),\n\t})\n\tcurrentModel.Arn = res.CertificateArn\n\tcurrentModel.Id = res.CertificateId\n\n\tresponse := handler.ProgressEvent{\n\t\tOperationStatus: handler.Success,\n\t\tMessage: \"Created certificate\",\n\t\tResourceModel: currentModel,\n\t}\n\treturn response, nil\n}", "func (c *InputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewCreateanewNcosLevelRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/ncoslevels\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}", "func (client IdentityClient) createTagNamespace(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagNamespaces\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagNamespaceResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (client *StorageTargetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, storagetarget StorageTarget, options *StorageTargetsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif cacheName == \"\" {\n\t\treturn nil, errors.New(\"parameter cacheName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cacheName}\", url.PathEscape(cacheName))\n\tif storageTargetName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageTargetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageTargetName}\", url.PathEscape(storageTargetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, storagetarget)\n}", "func (r ApiCreateHyperflexClusterStoragePolicyRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexClusterStoragePolicyRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}", "func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func FakeNewStorage() *fakeStorage {\n\treturn &fakeStorage{}\n}", "func testMakeBucketRegionsV2() {\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"MakeBucket(bucketName, region)\"\n\targs := map[string]interface{}{\n\t\t\"bucketName\": \"\",\n\t\t\"region\": \"eu-west-1\",\n\t}\n\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object.\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO v2 client object creation failed\", err)\n\t\treturn\n\t}\n\n\t// Enable tracing, write to stderr.\n\t// c.TraceOn(os.Stderr)\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket in 'eu-central-1'.\n\tif err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: \"eu-west-1\"}); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\tif err = cleanupBucket(bucketName, c); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"CleanupBucket failed while removing bucket recursively\", err)\n\t\treturn\n\t}\n\n\t// Make a new bucket with '.' in its name, in 'us-west-2'. This\n\t// request is internally staged into a path style instead of\n\t// virtual host style.\n\tif err = c.MakeBucket(context.Background(), bucketName+\".withperiod\", minio.MakeBucketOptions{Region: \"us-west-2\"}); err != nil {\n\t\targs[\"bucketName\"] = bucketName + \".withperiod\"\n\t\targs[\"region\"] = \"us-west-2\"\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket test with a bucket name with period, '.', failed\", err)\n\t\treturn\n\t}\n\n\t// Delete all objects and buckets\n\tif err = cleanupBucket(bucketName+\".withperiod\", c); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"CleanupBucket failed while removing bucket recursively\", err)\n\t\treturn\n\t}\n\n\tsuccessLogger(testName, function, args, startTime).Info()\n}" ]
[ "0.55036443", "0.5441542", "0.54345405", "0.53376144", "0.5327419", "0.5230142", "0.5220554", "0.51865524", "0.5161279", "0.51439565", "0.51330143", "0.5099054", "0.5083778", "0.50006616", "0.49205598", "0.49018854", "0.4880663", "0.48625007", "0.4858393", "0.48457134", "0.48373488", "0.47943708", "0.47841513", "0.47731715", "0.477108", "0.47597358", "0.47539708", "0.47482866", "0.4712211", "0.46858007", "0.4677622", "0.46735474", "0.46704027", "0.46653154", "0.46584615", "0.46584615", "0.46572953", "0.46550792", "0.4654365", "0.46437842", "0.464102", "0.4639023", "0.46214062", "0.46158198", "0.4601686", "0.45853564", "0.45780158", "0.45725083", "0.45660588", "0.45630854", "0.4561464", "0.4548092", "0.4547668", "0.4547126", "0.4538774", "0.45334", "0.45221058", "0.45190564", "0.45172307", "0.4511225", "0.45054457", "0.4505135", "0.4503608", "0.45030028", "0.4498054", "0.4498054", "0.44975084", "0.44935063", "0.44903445", "0.4489721", "0.4489721", "0.44858292", "0.44809568", "0.4478601", "0.44776982", "0.44670296", "0.44652006", "0.44652006", "0.44613594", "0.44606188", "0.44492623", "0.44455728", "0.44455728", "0.4444457", "0.44385377", "0.44361433", "0.44360313", "0.44353145", "0.44346583", "0.4433771", "0.44262692", "0.44253466", "0.44228992", "0.44189712", "0.44182664", "0.44182205", "0.4417879", "0.4417844", "0.44161612", "0.44134778" ]
0.6698065
0
WithPayload adds the payload to the create storage v1 c s i node o k response
func (o *CreateStorageV1CSINodeOK) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeOK { o.Payload = payload return o }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *CreateStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenOK) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDOK) SetPayload(payload *models.Storage) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *ReplaceStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateStorageV1CSINodeAccepted) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateInternalServerError) SetPayload(payload *ClientPermissionCreateInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *CreateClusterDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *CreateSpoeCreated) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tr *resource,\n) (*svcsdk.CreateReplicationGroupInput, error) {\n\tres := &svcsdk.CreateReplicationGroupInput{}\n\n\tif r.ko.Spec.AtRestEncryptionEnabled != nil {\n\t\tres.SetAtRestEncryptionEnabled(*r.ko.Spec.AtRestEncryptionEnabled)\n\t}\n\tif r.ko.Spec.AuthToken != nil {\n\t\tres.SetAuthToken(*r.ko.Spec.AuthToken)\n\t}\n\tif r.ko.Spec.AutoMinorVersionUpgrade != nil {\n\t\tres.SetAutoMinorVersionUpgrade(*r.ko.Spec.AutoMinorVersionUpgrade)\n\t}\n\tif r.ko.Spec.AutomaticFailoverEnabled != nil {\n\t\tres.SetAutomaticFailoverEnabled(*r.ko.Spec.AutomaticFailoverEnabled)\n\t}\n\tif r.ko.Spec.CacheNodeType != nil {\n\t\tres.SetCacheNodeType(*r.ko.Spec.CacheNodeType)\n\t}\n\tif r.ko.Spec.CacheParameterGroupName != nil {\n\t\tres.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName)\n\t}\n\tif r.ko.Spec.CacheSecurityGroupNames != nil {\n\t\tf6 := []*string{}\n\t\tfor _, f6iter := range r.ko.Spec.CacheSecurityGroupNames {\n\t\t\tvar f6elem string\n\t\t\tf6elem = *f6iter\n\t\t\tf6 = append(f6, &f6elem)\n\t\t}\n\t\tres.SetCacheSecurityGroupNames(f6)\n\t}\n\tif r.ko.Spec.CacheSubnetGroupName != nil {\n\t\tres.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName)\n\t}\n\tif r.ko.Spec.Engine != nil {\n\t\tres.SetEngine(*r.ko.Spec.Engine)\n\t}\n\tif r.ko.Spec.EngineVersion != nil {\n\t\tres.SetEngineVersion(*r.ko.Spec.EngineVersion)\n\t}\n\tif r.ko.Spec.GlobalReplicationGroupID != nil {\n\t\tres.SetGlobalReplicationGroupId(*r.ko.Spec.GlobalReplicationGroupID)\n\t}\n\tif r.ko.Spec.KMSKeyID != nil {\n\t\tres.SetKmsKeyId(*r.ko.Spec.KMSKeyID)\n\t}\n\tif r.ko.Spec.MultiAZEnabled != nil {\n\t\tres.SetMultiAZEnabled(*r.ko.Spec.MultiAZEnabled)\n\t}\n\tif r.ko.Spec.NodeGroupConfiguration != nil {\n\t\tf13 := []*svcsdk.NodeGroupConfiguration{}\n\t\tfor _, f13iter := range r.ko.Spec.NodeGroupConfiguration {\n\t\t\tf13elem := &svcsdk.NodeGroupConfiguration{}\n\t\t\tif f13iter.NodeGroupID != nil {\n\t\t\t\tf13elem.SetNodeGroupId(*f13iter.NodeGroupID)\n\t\t\t}\n\t\t\tif f13iter.PrimaryAvailabilityZone != nil {\n\t\t\t\tf13elem.SetPrimaryAvailabilityZone(*f13iter.PrimaryAvailabilityZone)\n\t\t\t}\n\t\t\tif f13iter.PrimaryOutpostARN != nil {\n\t\t\t\tf13elem.SetPrimaryOutpostArn(*f13iter.PrimaryOutpostARN)\n\t\t\t}\n\t\t\tif f13iter.ReplicaAvailabilityZones != nil {\n\t\t\t\tf13elemf3 := []*string{}\n\t\t\t\tfor _, f13elemf3iter := range f13iter.ReplicaAvailabilityZones {\n\t\t\t\t\tvar f13elemf3elem string\n\t\t\t\t\tf13elemf3elem = *f13elemf3iter\n\t\t\t\t\tf13elemf3 = append(f13elemf3, &f13elemf3elem)\n\t\t\t\t}\n\t\t\t\tf13elem.SetReplicaAvailabilityZones(f13elemf3)\n\t\t\t}\n\t\t\tif f13iter.ReplicaCount != nil {\n\t\t\t\tf13elem.SetReplicaCount(*f13iter.ReplicaCount)\n\t\t\t}\n\t\t\tif f13iter.ReplicaOutpostARNs != nil {\n\t\t\t\tf13elemf5 := []*string{}\n\t\t\t\tfor _, f13elemf5iter := range f13iter.ReplicaOutpostARNs {\n\t\t\t\t\tvar f13elemf5elem string\n\t\t\t\t\tf13elemf5elem = *f13elemf5iter\n\t\t\t\t\tf13elemf5 = append(f13elemf5, &f13elemf5elem)\n\t\t\t\t}\n\t\t\t\tf13elem.SetReplicaOutpostArns(f13elemf5)\n\t\t\t}\n\t\t\tif f13iter.Slots != nil {\n\t\t\t\tf13elem.SetSlots(*f13iter.Slots)\n\t\t\t}\n\t\t\tf13 = append(f13, f13elem)\n\t\t}\n\t\tres.SetNodeGroupConfiguration(f13)\n\t}\n\tif r.ko.Spec.NotificationTopicARN != nil {\n\t\tres.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN)\n\t}\n\tif r.ko.Spec.NumCacheClusters != nil {\n\t\tres.SetNumCacheClusters(*r.ko.Spec.NumCacheClusters)\n\t}\n\tif r.ko.Spec.NumNodeGroups != nil {\n\t\tres.SetNumNodeGroups(*r.ko.Spec.NumNodeGroups)\n\t}\n\tif r.ko.Spec.Port != nil {\n\t\tres.SetPort(*r.ko.Spec.Port)\n\t}\n\tif r.ko.Spec.PreferredCacheClusterAZs != nil {\n\t\tf18 := []*string{}\n\t\tfor _, f18iter := range r.ko.Spec.PreferredCacheClusterAZs {\n\t\t\tvar f18elem string\n\t\t\tf18elem = *f18iter\n\t\t\tf18 = append(f18, &f18elem)\n\t\t}\n\t\tres.SetPreferredCacheClusterAZs(f18)\n\t}\n\tif r.ko.Spec.PreferredMaintenanceWindow != nil {\n\t\tres.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow)\n\t}\n\tif r.ko.Spec.PrimaryClusterID != nil {\n\t\tres.SetPrimaryClusterId(*r.ko.Spec.PrimaryClusterID)\n\t}\n\tif r.ko.Spec.ReplicasPerNodeGroup != nil {\n\t\tres.SetReplicasPerNodeGroup(*r.ko.Spec.ReplicasPerNodeGroup)\n\t}\n\tif r.ko.Spec.ReplicationGroupDescription != nil {\n\t\tres.SetReplicationGroupDescription(*r.ko.Spec.ReplicationGroupDescription)\n\t}\n\tif r.ko.Spec.ReplicationGroupID != nil {\n\t\tres.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID)\n\t}\n\tif r.ko.Spec.SecurityGroupIDs != nil {\n\t\tf24 := []*string{}\n\t\tfor _, f24iter := range r.ko.Spec.SecurityGroupIDs {\n\t\t\tvar f24elem string\n\t\t\tf24elem = *f24iter\n\t\t\tf24 = append(f24, &f24elem)\n\t\t}\n\t\tres.SetSecurityGroupIds(f24)\n\t}\n\tif r.ko.Spec.SnapshotARNs != nil {\n\t\tf25 := []*string{}\n\t\tfor _, f25iter := range r.ko.Spec.SnapshotARNs {\n\t\t\tvar f25elem string\n\t\t\tf25elem = *f25iter\n\t\t\tf25 = append(f25, &f25elem)\n\t\t}\n\t\tres.SetSnapshotArns(f25)\n\t}\n\tif r.ko.Spec.SnapshotName != nil {\n\t\tres.SetSnapshotName(*r.ko.Spec.SnapshotName)\n\t}\n\tif r.ko.Spec.SnapshotRetentionLimit != nil {\n\t\tres.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit)\n\t}\n\tif r.ko.Spec.SnapshotWindow != nil {\n\t\tres.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow)\n\t}\n\tif r.ko.Spec.Tags != nil {\n\t\tf29 := []*svcsdk.Tag{}\n\t\tfor _, f29iter := range r.ko.Spec.Tags {\n\t\t\tf29elem := &svcsdk.Tag{}\n\t\t\tif f29iter.Key != nil {\n\t\t\t\tf29elem.SetKey(*f29iter.Key)\n\t\t\t}\n\t\t\tif f29iter.Value != nil {\n\t\t\t\tf29elem.SetValue(*f29iter.Value)\n\t\t\t}\n\t\t\tf29 = append(f29, f29elem)\n\t\t}\n\t\tres.SetTags(f29)\n\t}\n\tif r.ko.Spec.TransitEncryptionEnabled != nil {\n\t\tres.SetTransitEncryptionEnabled(*r.ko.Spec.TransitEncryptionEnabled)\n\t}\n\tif r.ko.Spec.UserGroupIDs != nil {\n\t\tf31 := []*string{}\n\t\tfor _, f31iter := range r.ko.Spec.UserGroupIDs {\n\t\t\tvar f31elem string\n\t\t\tf31elem = *f31iter\n\t\t\tf31 = append(f31, &f31elem)\n\t\t}\n\t\tres.SetUserGroupIds(f31)\n\t}\n\n\treturn res, nil\n}", "func (o *ClientPermissionCreateOK) SetPayload(payload *ClientPermissionCreateOKBody) {\n\to.Payload = payload\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *ReplaceExtensionsV1beta1NamespacedIngressCreated {\n\to.Payload = payload\n\treturn o\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tctx context.Context,\n\tr *resource,\n) (*svcsdk.CreateModelPackageInput, error) {\n\tres := &svcsdk.CreateModelPackageInput{}\n\n\tif r.ko.Spec.CertifyForMarketplace != nil {\n\t\tres.SetCertifyForMarketplace(*r.ko.Spec.CertifyForMarketplace)\n\t}\n\tif r.ko.Spec.ClientToken != nil {\n\t\tres.SetClientToken(*r.ko.Spec.ClientToken)\n\t}\n\tif r.ko.Spec.InferenceSpecification != nil {\n\t\tf2 := &svcsdk.InferenceSpecification{}\n\t\tif r.ko.Spec.InferenceSpecification.Containers != nil {\n\t\t\tf2f0 := []*svcsdk.ModelPackageContainerDefinition{}\n\t\t\tfor _, f2f0iter := range r.ko.Spec.InferenceSpecification.Containers {\n\t\t\t\tf2f0elem := &svcsdk.ModelPackageContainerDefinition{}\n\t\t\t\tif f2f0iter.ContainerHostname != nil {\n\t\t\t\t\tf2f0elem.SetContainerHostname(*f2f0iter.ContainerHostname)\n\t\t\t\t}\n\t\t\t\tif f2f0iter.Image != nil {\n\t\t\t\t\tf2f0elem.SetImage(*f2f0iter.Image)\n\t\t\t\t}\n\t\t\t\tif f2f0iter.ImageDigest != nil {\n\t\t\t\t\tf2f0elem.SetImageDigest(*f2f0iter.ImageDigest)\n\t\t\t\t}\n\t\t\t\tif f2f0iter.ModelDataURL != nil {\n\t\t\t\t\tf2f0elem.SetModelDataUrl(*f2f0iter.ModelDataURL)\n\t\t\t\t}\n\t\t\t\tif f2f0iter.ProductID != nil {\n\t\t\t\t\tf2f0elem.SetProductId(*f2f0iter.ProductID)\n\t\t\t\t}\n\t\t\t\tf2f0 = append(f2f0, f2f0elem)\n\t\t\t}\n\t\t\tf2.SetContainers(f2f0)\n\t\t}\n\t\tif r.ko.Spec.InferenceSpecification.SupportedContentTypes != nil {\n\t\t\tf2f1 := []*string{}\n\t\t\tfor _, f2f1iter := range r.ko.Spec.InferenceSpecification.SupportedContentTypes {\n\t\t\t\tvar f2f1elem string\n\t\t\t\tf2f1elem = *f2f1iter\n\t\t\t\tf2f1 = append(f2f1, &f2f1elem)\n\t\t\t}\n\t\t\tf2.SetSupportedContentTypes(f2f1)\n\t\t}\n\t\tif r.ko.Spec.InferenceSpecification.SupportedRealtimeInferenceInstanceTypes != nil {\n\t\t\tf2f2 := []*string{}\n\t\t\tfor _, f2f2iter := range r.ko.Spec.InferenceSpecification.SupportedRealtimeInferenceInstanceTypes {\n\t\t\t\tvar f2f2elem string\n\t\t\t\tf2f2elem = *f2f2iter\n\t\t\t\tf2f2 = append(f2f2, &f2f2elem)\n\t\t\t}\n\t\t\tf2.SetSupportedRealtimeInferenceInstanceTypes(f2f2)\n\t\t}\n\t\tif r.ko.Spec.InferenceSpecification.SupportedResponseMIMETypes != nil {\n\t\t\tf2f3 := []*string{}\n\t\t\tfor _, f2f3iter := range r.ko.Spec.InferenceSpecification.SupportedResponseMIMETypes {\n\t\t\t\tvar f2f3elem string\n\t\t\t\tf2f3elem = *f2f3iter\n\t\t\t\tf2f3 = append(f2f3, &f2f3elem)\n\t\t\t}\n\t\t\tf2.SetSupportedResponseMIMETypes(f2f3)\n\t\t}\n\t\tif r.ko.Spec.InferenceSpecification.SupportedTransformInstanceTypes != nil {\n\t\t\tf2f4 := []*string{}\n\t\t\tfor _, f2f4iter := range r.ko.Spec.InferenceSpecification.SupportedTransformInstanceTypes {\n\t\t\t\tvar f2f4elem string\n\t\t\t\tf2f4elem = *f2f4iter\n\t\t\t\tf2f4 = append(f2f4, &f2f4elem)\n\t\t\t}\n\t\t\tf2.SetSupportedTransformInstanceTypes(f2f4)\n\t\t}\n\t\tres.SetInferenceSpecification(f2)\n\t}\n\tif r.ko.Spec.MetadataProperties != nil {\n\t\tf3 := &svcsdk.MetadataProperties{}\n\t\tif r.ko.Spec.MetadataProperties.CommitID != nil {\n\t\t\tf3.SetCommitId(*r.ko.Spec.MetadataProperties.CommitID)\n\t\t}\n\t\tif r.ko.Spec.MetadataProperties.GeneratedBy != nil {\n\t\t\tf3.SetGeneratedBy(*r.ko.Spec.MetadataProperties.GeneratedBy)\n\t\t}\n\t\tif r.ko.Spec.MetadataProperties.ProjectID != nil {\n\t\t\tf3.SetProjectId(*r.ko.Spec.MetadataProperties.ProjectID)\n\t\t}\n\t\tif r.ko.Spec.MetadataProperties.Repository != nil {\n\t\t\tf3.SetRepository(*r.ko.Spec.MetadataProperties.Repository)\n\t\t}\n\t\tres.SetMetadataProperties(f3)\n\t}\n\tif r.ko.Spec.ModelApprovalStatus != nil {\n\t\tres.SetModelApprovalStatus(*r.ko.Spec.ModelApprovalStatus)\n\t}\n\tif r.ko.Spec.ModelMetrics != nil {\n\t\tf5 := &svcsdk.ModelMetrics{}\n\t\tif r.ko.Spec.ModelMetrics.Bias != nil {\n\t\t\tf5f0 := &svcsdk.Bias{}\n\t\t\tif r.ko.Spec.ModelMetrics.Bias.Report != nil {\n\t\t\t\tf5f0f0 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Bias.Report.ContentDigest != nil {\n\t\t\t\t\tf5f0f0.SetContentDigest(*r.ko.Spec.ModelMetrics.Bias.Report.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Bias.Report.ContentType != nil {\n\t\t\t\t\tf5f0f0.SetContentType(*r.ko.Spec.ModelMetrics.Bias.Report.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Bias.Report.S3URI != nil {\n\t\t\t\t\tf5f0f0.SetS3Uri(*r.ko.Spec.ModelMetrics.Bias.Report.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f0.SetReport(f5f0f0)\n\t\t\t}\n\t\t\tf5.SetBias(f5f0)\n\t\t}\n\t\tif r.ko.Spec.ModelMetrics.Explainability != nil {\n\t\t\tf5f1 := &svcsdk.Explainability{}\n\t\t\tif r.ko.Spec.ModelMetrics.Explainability.Report != nil {\n\t\t\t\tf5f1f0 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Explainability.Report.ContentDigest != nil {\n\t\t\t\t\tf5f1f0.SetContentDigest(*r.ko.Spec.ModelMetrics.Explainability.Report.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Explainability.Report.ContentType != nil {\n\t\t\t\t\tf5f1f0.SetContentType(*r.ko.Spec.ModelMetrics.Explainability.Report.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Explainability.Report.S3URI != nil {\n\t\t\t\t\tf5f1f0.SetS3Uri(*r.ko.Spec.ModelMetrics.Explainability.Report.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f1.SetReport(f5f1f0)\n\t\t\t}\n\t\t\tf5.SetExplainability(f5f1)\n\t\t}\n\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality != nil {\n\t\t\tf5f2 := &svcsdk.ModelDataQuality{}\n\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints != nil {\n\t\t\t\tf5f2f0 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.ContentDigest != nil {\n\t\t\t\t\tf5f2f0.SetContentDigest(*r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.ContentType != nil {\n\t\t\t\t\tf5f2f0.SetContentType(*r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.S3URI != nil {\n\t\t\t\t\tf5f2f0.SetS3Uri(*r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f2.SetConstraints(f5f2f0)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics != nil {\n\t\t\t\tf5f2f1 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.ContentDigest != nil {\n\t\t\t\t\tf5f2f1.SetContentDigest(*r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.ContentType != nil {\n\t\t\t\t\tf5f2f1.SetContentType(*r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.S3URI != nil {\n\t\t\t\t\tf5f2f1.SetS3Uri(*r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f2.SetStatistics(f5f2f1)\n\t\t\t}\n\t\t\tf5.SetModelDataQuality(f5f2)\n\t\t}\n\t\tif r.ko.Spec.ModelMetrics.ModelQuality != nil {\n\t\t\tf5f3 := &svcsdk.ModelQuality{}\n\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Constraints != nil {\n\t\t\t\tf5f3f0 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Constraints.ContentDigest != nil {\n\t\t\t\t\tf5f3f0.SetContentDigest(*r.ko.Spec.ModelMetrics.ModelQuality.Constraints.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Constraints.ContentType != nil {\n\t\t\t\t\tf5f3f0.SetContentType(*r.ko.Spec.ModelMetrics.ModelQuality.Constraints.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Constraints.S3URI != nil {\n\t\t\t\t\tf5f3f0.SetS3Uri(*r.ko.Spec.ModelMetrics.ModelQuality.Constraints.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f3.SetConstraints(f5f3f0)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Statistics != nil {\n\t\t\t\tf5f3f1 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Statistics.ContentDigest != nil {\n\t\t\t\t\tf5f3f1.SetContentDigest(*r.ko.Spec.ModelMetrics.ModelQuality.Statistics.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Statistics.ContentType != nil {\n\t\t\t\t\tf5f3f1.SetContentType(*r.ko.Spec.ModelMetrics.ModelQuality.Statistics.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Statistics.S3URI != nil {\n\t\t\t\t\tf5f3f1.SetS3Uri(*r.ko.Spec.ModelMetrics.ModelQuality.Statistics.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f3.SetStatistics(f5f3f1)\n\t\t\t}\n\t\t\tf5.SetModelQuality(f5f3)\n\t\t}\n\t\tres.SetModelMetrics(f5)\n\t}\n\tif r.ko.Spec.ModelPackageDescription != nil {\n\t\tres.SetModelPackageDescription(*r.ko.Spec.ModelPackageDescription)\n\t}\n\tif r.ko.Spec.ModelPackageGroupName != nil {\n\t\tres.SetModelPackageGroupName(*r.ko.Spec.ModelPackageGroupName)\n\t}\n\tif r.ko.Spec.ModelPackageName != nil {\n\t\tres.SetModelPackageName(*r.ko.Spec.ModelPackageName)\n\t}\n\tif r.ko.Spec.SourceAlgorithmSpecification != nil {\n\t\tf9 := &svcsdk.SourceAlgorithmSpecification{}\n\t\tif r.ko.Spec.SourceAlgorithmSpecification.SourceAlgorithms != nil {\n\t\t\tf9f0 := []*svcsdk.SourceAlgorithm{}\n\t\t\tfor _, f9f0iter := range r.ko.Spec.SourceAlgorithmSpecification.SourceAlgorithms {\n\t\t\t\tf9f0elem := &svcsdk.SourceAlgorithm{}\n\t\t\t\tif f9f0iter.AlgorithmName != nil {\n\t\t\t\t\tf9f0elem.SetAlgorithmName(*f9f0iter.AlgorithmName)\n\t\t\t\t}\n\t\t\t\tif f9f0iter.ModelDataURL != nil {\n\t\t\t\t\tf9f0elem.SetModelDataUrl(*f9f0iter.ModelDataURL)\n\t\t\t\t}\n\t\t\t\tf9f0 = append(f9f0, f9f0elem)\n\t\t\t}\n\t\t\tf9.SetSourceAlgorithms(f9f0)\n\t\t}\n\t\tres.SetSourceAlgorithmSpecification(f9)\n\t}\n\tif r.ko.Spec.Tags != nil {\n\t\tf10 := []*svcsdk.Tag{}\n\t\tfor _, f10iter := range r.ko.Spec.Tags {\n\t\t\tf10elem := &svcsdk.Tag{}\n\t\t\tif f10iter.Key != nil {\n\t\t\t\tf10elem.SetKey(*f10iter.Key)\n\t\t\t}\n\t\t\tif f10iter.Value != nil {\n\t\t\t\tf10elem.SetValue(*f10iter.Value)\n\t\t\t}\n\t\t\tf10 = append(f10, f10elem)\n\t\t}\n\t\tres.SetTags(f10)\n\t}\n\tif r.ko.Spec.ValidationSpecification != nil {\n\t\tf11 := &svcsdk.ModelPackageValidationSpecification{}\n\t\tif r.ko.Spec.ValidationSpecification.ValidationProfiles != nil {\n\t\t\tf11f0 := []*svcsdk.ModelPackageValidationProfile{}\n\t\t\tfor _, f11f0iter := range r.ko.Spec.ValidationSpecification.ValidationProfiles {\n\t\t\t\tf11f0elem := &svcsdk.ModelPackageValidationProfile{}\n\t\t\t\tif f11f0iter.ProfileName != nil {\n\t\t\t\t\tf11f0elem.SetProfileName(*f11f0iter.ProfileName)\n\t\t\t\t}\n\t\t\t\tif f11f0iter.TransformJobDefinition != nil {\n\t\t\t\t\tf11f0elemf1 := &svcsdk.TransformJobDefinition{}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.BatchStrategy != nil {\n\t\t\t\t\t\tf11f0elemf1.SetBatchStrategy(*f11f0iter.TransformJobDefinition.BatchStrategy)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.Environment != nil {\n\t\t\t\t\t\tf11f0elemf1f1 := map[string]*string{}\n\t\t\t\t\t\tfor f11f0elemf1f1key, f11f0elemf1f1valiter := range f11f0iter.TransformJobDefinition.Environment {\n\t\t\t\t\t\t\tvar f11f0elemf1f1val string\n\t\t\t\t\t\t\tf11f0elemf1f1val = *f11f0elemf1f1valiter\n\t\t\t\t\t\t\tf11f0elemf1f1[f11f0elemf1f1key] = &f11f0elemf1f1val\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf11f0elemf1.SetEnvironment(f11f0elemf1f1)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.MaxConcurrentTransforms != nil {\n\t\t\t\t\t\tf11f0elemf1.SetMaxConcurrentTransforms(*f11f0iter.TransformJobDefinition.MaxConcurrentTransforms)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.MaxPayloadInMB != nil {\n\t\t\t\t\t\tf11f0elemf1.SetMaxPayloadInMB(*f11f0iter.TransformJobDefinition.MaxPayloadInMB)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput != nil {\n\t\t\t\t\t\tf11f0elemf1f4 := &svcsdk.TransformInput{}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.CompressionType != nil {\n\t\t\t\t\t\t\tf11f0elemf1f4.SetCompressionType(*f11f0iter.TransformJobDefinition.TransformInput.CompressionType)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.ContentType != nil {\n\t\t\t\t\t\t\tf11f0elemf1f4.SetContentType(*f11f0iter.TransformJobDefinition.TransformInput.ContentType)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.DataSource != nil {\n\t\t\t\t\t\t\tf11f0elemf1f4f2 := &svcsdk.TransformDataSource{}\n\t\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource != nil {\n\t\t\t\t\t\t\t\tf11f0elemf1f4f2f0 := &svcsdk.TransformS3DataSource{}\n\t\t\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource.S3DataType != nil {\n\t\t\t\t\t\t\t\t\tf11f0elemf1f4f2f0.SetS3DataType(*f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource.S3DataType)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource.S3URI != nil {\n\t\t\t\t\t\t\t\t\tf11f0elemf1f4f2f0.SetS3Uri(*f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource.S3URI)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tf11f0elemf1f4f2.SetS3DataSource(f11f0elemf1f4f2f0)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tf11f0elemf1f4.SetDataSource(f11f0elemf1f4f2)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.SplitType != nil {\n\t\t\t\t\t\t\tf11f0elemf1f4.SetSplitType(*f11f0iter.TransformJobDefinition.TransformInput.SplitType)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf11f0elemf1.SetTransformInput(f11f0elemf1f4)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput != nil {\n\t\t\t\t\t\tf11f0elemf1f5 := &svcsdk.TransformOutput{}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput.Accept != nil {\n\t\t\t\t\t\t\tf11f0elemf1f5.SetAccept(*f11f0iter.TransformJobDefinition.TransformOutput.Accept)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput.AssembleWith != nil {\n\t\t\t\t\t\t\tf11f0elemf1f5.SetAssembleWith(*f11f0iter.TransformJobDefinition.TransformOutput.AssembleWith)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput.KMSKeyID != nil {\n\t\t\t\t\t\t\tf11f0elemf1f5.SetKmsKeyId(*f11f0iter.TransformJobDefinition.TransformOutput.KMSKeyID)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput.S3OutputPath != nil {\n\t\t\t\t\t\t\tf11f0elemf1f5.SetS3OutputPath(*f11f0iter.TransformJobDefinition.TransformOutput.S3OutputPath)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf11f0elemf1.SetTransformOutput(f11f0elemf1f5)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformResources != nil {\n\t\t\t\t\t\tf11f0elemf1f6 := &svcsdk.TransformResources{}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformResources.InstanceCount != nil {\n\t\t\t\t\t\t\tf11f0elemf1f6.SetInstanceCount(*f11f0iter.TransformJobDefinition.TransformResources.InstanceCount)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformResources.InstanceType != nil {\n\t\t\t\t\t\t\tf11f0elemf1f6.SetInstanceType(*f11f0iter.TransformJobDefinition.TransformResources.InstanceType)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformResources.VolumeKMSKeyID != nil {\n\t\t\t\t\t\t\tf11f0elemf1f6.SetVolumeKmsKeyId(*f11f0iter.TransformJobDefinition.TransformResources.VolumeKMSKeyID)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf11f0elemf1.SetTransformResources(f11f0elemf1f6)\n\t\t\t\t\t}\n\t\t\t\t\tf11f0elem.SetTransformJobDefinition(f11f0elemf1)\n\t\t\t\t}\n\t\t\t\tf11f0 = append(f11f0, f11f0elem)\n\t\t\t}\n\t\t\tf11.SetValidationProfiles(f11f0)\n\t\t}\n\t\tif r.ko.Spec.ValidationSpecification.ValidationRole != nil {\n\t\t\tf11.SetValidationRole(*r.ko.Spec.ValidationSpecification.ValidationRole)\n\t\t}\n\t\tres.SetValidationSpecification(f11)\n\t}\n\n\treturn res, nil\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *CreateExtensionsV1beta1NamespacedIngressCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateStorageSSLCertificateCreated) SetPayload(payload *models.SslCertificate) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *DeleteRuntimeContainerInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateUploadSessionDefault) WithPayload(payload *models.Error) *CreateUploadSessionDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateAuthenticationV1beta1TokenReviewCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1beta1TokenReview) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloOK) SetPayload(payload *models.CreateHelloResponse) {\n\to.Payload = payload\n}", "func (o *ReadStorageV1beta1CSIDriverOK) SetPayload(payload *models.IoK8sAPIStorageV1beta1CSIDriver) {\n\to.Payload = payload\n}", "func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateDefault) WithPayload(payload *models.Error) *CreateStorageSSLCertificateDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressOK) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *CreateExtensionsV1beta1NamespacedIngressOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) SetPayload(payload *models.ACLFile) {\n\to.Payload = payload\n}", "func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}", "func (r CreateRequest) Payload() *model.Payload {\n\tbuf, _ := json.Marshal(r)\n\treturn model.NewPostPayload(buf)\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenAccepted) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPINetworkingV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *CreateOK) SetPayload(payload *models.Event) {\n\to.Payload = payload\n}", "func (o *ServiceAddCreated) SetPayload(payload *models.Service) {\n\to.Payload = payload\n}", "func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}", "func (o *AddKeypairCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *CreateFileDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateOK) WithPayload(payload *models.Event) *CreateOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ReplaceCertificatesV1CertificateSigningRequestCreated) SetPayload(payload *models.IoK8sAPICertificatesV1CertificateSigningRequest) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDUnauthorized) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *SemverGenerateCreated) SetPayload(payload *models.SemverTagSet) {\n\to.Payload = payload\n}", "func (o *CreateFileDefault) WithPayload(payload *models.Error) *CreateFileDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *PostOrderCreated) SetPayload(payload *models.OrderCreateResponse) {\n\to.Payload = payload\n}", "func (o *PutProjectProjectNameStageStageNameServiceServiceNameResourceCreated) SetPayload(payload *models.Version) {\n\to.Payload = payload\n}", "func (o *PutReposOwnerRepoContentsPathOK) SetPayload(payload *models.CreateFile) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseCreated) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateUserGardenCreated) SetPayload(payload *models.Garden) {\n\to.Payload = payload\n}", "func (o *ReplicateCreated) WithPayload(payload *models.SteeringRequestID) *ReplicateCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetVSphereComputeResourcesOK) SetPayload(payload []*models.VSphereManagementObject) {\n\to.Payload = payload\n}", "func (o *GetVMVolumeDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateUserGardenDefault) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *CreateUploadSessionCreated) SetPayload(payload *models.UploadSession) {\n\to.Payload = payload\n}", "func (o *CreateACLAccepted) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *RegisterInfraEnvCreated) SetPayload(payload *models.InfraEnv) {\n\to.Payload = payload\n}", "func (o *CreateTaskInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *GetHealthzInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *WatchApiregistrationV1APIServiceOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1WatchEvent) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedPodOK) SetPayload(payload *models.IoK8sAPICoreV1Pod) {\n\to.Payload = payload\n}", "func (o *CreateCurrentAPISessionCertificateOK) SetPayload(payload *rest_model.CreateCurrentAPISessionCertificateEnvelope) {\n\to.Payload = payload\n}", "func (o *ReplaceNodeV1alpha1RuntimeClassCreated) SetPayload(payload *models.IoK8sAPINodeV1alpha1RuntimeClass) {\n\to.Payload = payload\n}", "func (o *ReplicateCreated) SetPayload(payload *models.SteeringRequestID) {\n\to.Payload = payload\n}", "func (o *CreateUploadSessionDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateBatchV1NamespacedJobCreated) SetPayload(payload *models.IoK8sAPIBatchV1Job) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceOK) SetPayload(payload *models.Resources) {\n\to.Payload = payload\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tr *resource,\n) (*svcsdk.CreateStageInput, error) {\n\tres := &svcsdk.CreateStageInput{}\n\n\tif r.ko.Spec.AccessLogSettings != nil {\n\t\tf0 := &svcsdk.AccessLogSettings{}\n\t\tif r.ko.Spec.AccessLogSettings.DestinationARN != nil {\n\t\t\tf0.SetDestinationArn(*r.ko.Spec.AccessLogSettings.DestinationARN)\n\t\t}\n\t\tif r.ko.Spec.AccessLogSettings.Format != nil {\n\t\t\tf0.SetFormat(*r.ko.Spec.AccessLogSettings.Format)\n\t\t}\n\t\tres.SetAccessLogSettings(f0)\n\t}\n\tif r.ko.Spec.APIID != nil {\n\t\tres.SetApiId(*r.ko.Spec.APIID)\n\t}\n\tif r.ko.Spec.AutoDeploy != nil {\n\t\tres.SetAutoDeploy(*r.ko.Spec.AutoDeploy)\n\t}\n\tif r.ko.Spec.ClientCertificateID != nil {\n\t\tres.SetClientCertificateId(*r.ko.Spec.ClientCertificateID)\n\t}\n\tif r.ko.Spec.DefaultRouteSettings != nil {\n\t\tf4 := &svcsdk.RouteSettings{}\n\t\tif r.ko.Spec.DefaultRouteSettings.DataTraceEnabled != nil {\n\t\t\tf4.SetDataTraceEnabled(*r.ko.Spec.DefaultRouteSettings.DataTraceEnabled)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.DetailedMetricsEnabled != nil {\n\t\t\tf4.SetDetailedMetricsEnabled(*r.ko.Spec.DefaultRouteSettings.DetailedMetricsEnabled)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.LoggingLevel != nil {\n\t\t\tf4.SetLoggingLevel(*r.ko.Spec.DefaultRouteSettings.LoggingLevel)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.ThrottlingBurstLimit != nil {\n\t\t\tf4.SetThrottlingBurstLimit(*r.ko.Spec.DefaultRouteSettings.ThrottlingBurstLimit)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.ThrottlingRateLimit != nil {\n\t\t\tf4.SetThrottlingRateLimit(*r.ko.Spec.DefaultRouteSettings.ThrottlingRateLimit)\n\t\t}\n\t\tres.SetDefaultRouteSettings(f4)\n\t}\n\tif r.ko.Spec.DeploymentID != nil {\n\t\tres.SetDeploymentId(*r.ko.Spec.DeploymentID)\n\t}\n\tif r.ko.Spec.Description != nil {\n\t\tres.SetDescription(*r.ko.Spec.Description)\n\t}\n\tif r.ko.Spec.RouteSettings != nil {\n\t\tf7 := map[string]*svcsdk.RouteSettings{}\n\t\tfor f7key, f7valiter := range r.ko.Spec.RouteSettings {\n\t\t\tf7val := &svcsdk.RouteSettings{}\n\t\t\tif f7valiter.DataTraceEnabled != nil {\n\t\t\t\tf7val.SetDataTraceEnabled(*f7valiter.DataTraceEnabled)\n\t\t\t}\n\t\t\tif f7valiter.DetailedMetricsEnabled != nil {\n\t\t\t\tf7val.SetDetailedMetricsEnabled(*f7valiter.DetailedMetricsEnabled)\n\t\t\t}\n\t\t\tif f7valiter.LoggingLevel != nil {\n\t\t\t\tf7val.SetLoggingLevel(*f7valiter.LoggingLevel)\n\t\t\t}\n\t\t\tif f7valiter.ThrottlingBurstLimit != nil {\n\t\t\t\tf7val.SetThrottlingBurstLimit(*f7valiter.ThrottlingBurstLimit)\n\t\t\t}\n\t\t\tif f7valiter.ThrottlingRateLimit != nil {\n\t\t\t\tf7val.SetThrottlingRateLimit(*f7valiter.ThrottlingRateLimit)\n\t\t\t}\n\t\t\tf7[f7key] = f7val\n\t\t}\n\t\tres.SetRouteSettings(f7)\n\t}\n\tif r.ko.Spec.StageName != nil {\n\t\tres.SetStageName(*r.ko.Spec.StageName)\n\t}\n\tif r.ko.Spec.StageVariables != nil {\n\t\tf9 := map[string]*string{}\n\t\tfor f9key, f9valiter := range r.ko.Spec.StageVariables {\n\t\t\tvar f9val string\n\t\t\tf9val = *f9valiter\n\t\t\tf9[f9key] = &f9val\n\t\t}\n\t\tres.SetStageVariables(f9)\n\t}\n\tif r.ko.Spec.Tags != nil {\n\t\tf10 := map[string]*string{}\n\t\tfor f10key, f10valiter := range r.ko.Spec.Tags {\n\t\t\tvar f10val string\n\t\t\tf10val = *f10valiter\n\t\t\tf10[f10key] = &f10val\n\t\t}\n\t\tres.SetTags(f10)\n\t}\n\n\treturn res, nil\n}", "func (o *GetAllStorageNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PostReposOwnerRepoKeysCreated) SetPayload(payload *models.UserKeysKeyID) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedPodBindingCreated) SetPayload(payload *models.IoK8sAPICoreV1Binding) {\n\to.Payload = payload\n}", "func (o *CreateFoldersCreated) SetPayload(payload *models.CreateFolderResp) {\n\to.Payload = payload\n}", "func (o *GetAllStorageOK) SetPayload(payload *models.PageStorageList) {\n\to.Payload = payload\n}", "func (o *GetClusterOK) SetPayload(payload *models.Cluster) {\n\to.Payload = payload\n}", "func (o AccountSAS) signPayload(params *url.Values) {\n\t// Refer: https://docs.microsoft.com/en-us/rest/api/storageservices/create-account-sas#constructing-the-signature-string\n\t// To construct the signature string for an account SAS, first construct the\n\t// string-to-sign from the fields comprising the request, then encode the\n\t// string as UTF-8 and compute the signature using the HMAC-SHA256\n\t// algorithm.\n\t//\n\t// Note:\n\t// - Fields included in the string-to-sign must be UTF-8, URL-decoded.\n\t// - Go by default uses utf-8 encoded strings.\n\t// - The `String()` methods ensure no URL encoding is taking place.\n\tstringToSign := o.storageAccountName + \"\\n\" +\n\t\to.SignedPermission.String() + \"\\n\" +\n\t\to.SignedServices.String() + \"\\n\" +\n\t\to.SignedResourceTypes.String() + \"\\n\" +\n\t\taztime.ToString(o.SignedStart) + \"\\n\" +\n\t\taztime.ToString(o.SignedExpiry) + \"\\n\" +\n\t\to.SignedIP.String() + \"\\n\" +\n\t\to.SignedProtocol.String() + \"\\n\" +\n\t\to.SignedVersion.String() + \"\\n\"\n\n\t// Compute HMAC-S256 signature\n\tsignature := crypto.HMACSHA256(\n\t\to.storageAccountKey,\n\t\t[]byte(stringToSign),\n\t)\n\n\tparams.Add(\"sig\", signature)\n}", "func (o *ClientPermissionCreateNotFound) SetPayload(payload *ClientPermissionCreateNotFoundBody) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesOK) SetPayload(payload *models.Presigned) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeOK) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *ReplaceStorageV1CSINodeOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateFileOK) SetPayload(payload *models.FileInfo) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateACLCreated) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (w *Writer) encodePayload(e *xml.Encoder) error {\n\tif w.ObjectName == \"\" {\n\t\treturn e.Encode(w.Payload)\n\t}\n\treturn e.EncodeElement(w.Payload, xml.StartElement{Name: xml.Name{Local: w.ObjectName}})\n}", "func (o *CreateDocumentCreated) SetPayload(payload *internalmessages.Document) {\n\to.Payload = payload\n}", "func (o *DeleteImageDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PutMeetupOK) SetPayload(payload *models.CreateObject) {\n\to.Payload = payload\n}", "func (l *Loader) newPayload(name string, ver int) int {\n\tpi := len(l.payloads)\n\tpp := l.allocPayload()\n\tpp.name = name\n\tpp.ver = ver\n\tl.payloads = append(l.payloads, pp)\n\tl.growExtAttrBitmaps()\n\treturn pi\n}", "func (o *GetServiceInstanceByNameDefault) SetPayload(payload *v1.Error) {\n\to.Payload = payload\n}", "func (o *GetS3BackupDefault) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PutWorkpaceByIDInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetVMVolumeOK) SetPayload(payload *models.VMVolume) {\n\to.Payload = payload\n}" ]
[ "0.62328905", "0.61603296", "0.5820772", "0.58046573", "0.5724808", "0.56804353", "0.549075", "0.54602885", "0.5446634", "0.54446435", "0.53883576", "0.5381992", "0.53013504", "0.5258901", "0.52294034", "0.5212655", "0.5201414", "0.51650834", "0.5115418", "0.511022", "0.5107597", "0.510538", "0.50979793", "0.50612956", "0.50599796", "0.50571674", "0.5055609", "0.50545347", "0.5054407", "0.5051237", "0.50283694", "0.5023062", "0.50214607", "0.5020122", "0.50155556", "0.50148124", "0.50124496", "0.50122756", "0.5009615", "0.50070435", "0.49954492", "0.49926415", "0.49914315", "0.49908692", "0.49908414", "0.49870437", "0.49778837", "0.4976418", "0.49756593", "0.4966552", "0.49658555", "0.49650788", "0.49616605", "0.49614707", "0.49543968", "0.49536973", "0.49535394", "0.4948762", "0.4943366", "0.4940812", "0.49402866", "0.49392375", "0.49364695", "0.49364495", "0.49362987", "0.49354568", "0.49323073", "0.49285102", "0.491959", "0.4916498", "0.49147442", "0.4913748", "0.49108088", "0.49047744", "0.49025622", "0.49024373", "0.4898566", "0.48965544", "0.48877156", "0.48850295", "0.4876522", "0.48764476", "0.4876205", "0.48748562", "0.4874031", "0.48704168", "0.4868422", "0.48666635", "0.48654446", "0.48620754", "0.48617935", "0.48604885", "0.48599273", "0.48586398", "0.4855507", "0.48533335", "0.4851218", "0.48508427", "0.48496488", "0.48438838" ]
0.5819
3
SetPayload sets the payload to the create storage v1 c s i node o k response
func (o *CreateStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) { o.Payload = payload }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *CreateStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDOK) SetPayload(payload *models.Storage) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}", "func (o *CreateClusterDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (nf *NetworkPayload) SetPayload(newpayload []byte) {\n}", "func (o *CreateTaskInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateInternalServerError) SetPayload(payload *ClientPermissionCreateInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) SetPayload(payload *models.ACLFile) {\n\to.Payload = payload\n}", "func (o *DeleteRuntimeContainerInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *GetDataContextTopologyUUIDNodeNodeUUIDOK) SetPayload(payload *models.TapiTopologyTopologyNode) {\n\to.Payload = payload\n}", "func (o *CreateSpoeCreated) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenOK) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateOK) SetPayload(payload *models.Event) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesOK) SetPayload(payload []*models.VSphereManagementObject) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateCreated) SetPayload(payload *models.SslCertificate) {\n\to.Payload = payload\n}", "func (o *GetHealthzInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *RegisterInfraEnvCreated) SetPayload(payload *models.InfraEnv) {\n\to.Payload = payload\n}", "func (o *ReplaceNodeV1alpha1RuntimeClassCreated) SetPayload(payload *models.IoK8sAPINodeV1alpha1RuntimeClass) {\n\to.Payload = payload\n}", "func (o *PutWorkpaceByIDInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *SemverGenerateCreated) SetPayload(payload *models.SemverTagSet) {\n\to.Payload = payload\n}", "func (o *GetBackupRuntimeEnvironmentsInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *ReplicateCreated) SetPayload(payload *models.SteeringRequestID) {\n\to.Payload = payload\n}", "func (o *AddKeypairCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *GetVSphereDatastoresInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}", "func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDUnauthorized) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *UpdateClusterInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (tx *Transaction) SetPayload() {\n\tsize := make([]byte, 300)\n\ttx.data.Payload = size\n}", "func (o *ReadStorageV1beta1CSIDriverOK) SetPayload(payload *models.IoK8sAPIStorageV1beta1CSIDriver) {\n\to.Payload = payload\n}", "func (o *GetClusterInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PostOrderCreated) SetPayload(payload *models.OrderCreateResponse) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesOK) SetPayload(payload *models.Presigned) {\n\to.Payload = payload\n}", "func (o *CreateUploadSessionDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateZoneInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *PutReposOwnerRepoContentsPathOK) SetPayload(payload *models.CreateFile) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeAccepted) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseCreated) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *CreateFileDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *UpdateCatalogInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetNFTContractTokenOK) SetPayload(payload *models.NFTTokenRow) {\n\to.Payload = payload\n}", "func (o *AddNewMaterialsForPostInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDInternalServerError) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *CreateSpoeDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *AddKeypairInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetVMVolumeDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *AddRegionAZCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateACLAccepted) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *GetV1RdssInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateAuthenticationV1beta1TokenReviewCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1beta1TokenReview) {\n\to.Payload = payload\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleCreated) SetPayload(payload *models.IoK8sAPIAutoscalingV1Scale) {\n\to.Payload = payload\n}", "func (o *CreateACLDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateUserGardenDefault) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *GetS3BackupDefault) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseOK) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *CreateUploadSessionCreated) SetPayload(payload *models.UploadSession) {\n\to.Payload = payload\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *PutMeetupOK) SetPayload(payload *models.CreateObject) {\n\to.Payload = payload\n}", "func (o *PutWorkpaceByIDOK) SetPayload(payload *models.Workspace) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *GetAllStorageOK) SetPayload(payload *models.PageStorageList) {\n\to.Payload = payload\n}", "func (o *CreateTCPCheckDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetTaskSyncInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *GetProviderRegionByIDInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetVSphereDatastoresOK) SetPayload(payload []*models.VSphereDatastore) {\n\to.Payload = payload\n}", "func (o *PostReposOwnerRepoKeysCreated) SetPayload(payload *models.UserKeysKeyID) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedPodBindingCreated) SetPayload(payload *models.IoK8sAPICoreV1Binding) {\n\to.Payload = payload\n}", "func (o *DeleteOrganizationInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *CreateTaskDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateACLCreated) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesUnauthorized) SetPayload(payload *models.InfraError) {\n\to.Payload = payload\n}", "func (o *GetTagInternalServerError) SetPayload(payload models.Error) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateConflict) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PostInteractionCreated) SetPayload(payload *models.ConsoleInteraction) {\n\to.Payload = payload\n}", "func (o *GetS3BackupOK) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *GetHealthzOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *DeleteRuntimeContainerNotFound) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *ReplaceCertificatesV1CertificateSigningRequestCreated) SetPayload(payload *models.IoK8sAPICertificatesV1CertificateSigningRequest) {\n\to.Payload = payload\n}", "func (o *PutProjectProjectNameStageStageNameResourceResourceURIDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloOK) SetPayload(payload *models.CreateHelloResponse) {\n\to.Payload = payload\n}", "func (o *AddRegionAZInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *PutSlideSuperlikeDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetClusterOK) SetPayload(payload *models.Cluster) {\n\to.Payload = payload\n}" ]
[ "0.70977515", "0.6899386", "0.68568575", "0.67074084", "0.6604878", "0.6598248", "0.65638936", "0.65335", "0.652902", "0.65217173", "0.65172774", "0.65045047", "0.64854646", "0.6482898", "0.64746904", "0.6470571", "0.64697737", "0.6460338", "0.6450474", "0.64481056", "0.6432618", "0.6428215", "0.6403498", "0.64004284", "0.63988733", "0.639036", "0.6387434", "0.63805", "0.63788724", "0.6372221", "0.63708645", "0.6363139", "0.63497627", "0.63442093", "0.6343354", "0.63420963", "0.6339794", "0.6320318", "0.6315523", "0.63132334", "0.6306194", "0.6304245", "0.6285652", "0.628215", "0.62797016", "0.62737864", "0.6273542", "0.62721527", "0.6266982", "0.62654173", "0.62638825", "0.62618744", "0.6261184", "0.62606645", "0.625601", "0.62531155", "0.624595", "0.62427825", "0.62374556", "0.6235498", "0.6233888", "0.62238127", "0.6220575", "0.62180847", "0.62161136", "0.62145174", "0.62134075", "0.62100524", "0.62093276", "0.6209312", "0.6209099", "0.62050503", "0.62006664", "0.6193378", "0.61915106", "0.6188927", "0.6187647", "0.6186553", "0.61854446", "0.6176998", "0.6175128", "0.61737204", "0.6171902", "0.6171271", "0.6171058", "0.6170094", "0.61679643", "0.6159881", "0.6158727", "0.61576766", "0.61573064", "0.6157162", "0.6156729", "0.6155572", "0.6155007", "0.61539704", "0.61524236", "0.6151563", "0.61512655", "0.6139785" ]
0.71910954
0
WriteResponse to the client
func (o *CreateStorageV1CSINodeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(200) if o.Payload != nil { payload := o.Payload if err := producer.Produce(rw, payload); err != nil { panic(err) // let the recovery middleware deal with this } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Write(w io.Writer) error", "func (c *Operation) writeResponse(rw http.ResponseWriter, status int, data []byte) { // nolint: unparam\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write(data); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}", "func WriteResponse(w http.ResponseWriter, object interface{}, rerr *irma.RemoteError) {\n\tstatus, bts := JsonResponse(object, rerr)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\t_, err := w.Write(bts)\n\tif err != nil {\n\t\tLogWarning(errors.WrapPrefix(err, \"failed to write response\", 0))\n\t}\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func (o *PingOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, v interface{}, statusCode int) {\n\tresBody, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\t_, _ = w.Write(resBody)\n}", "func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func (o *GetPingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func WriteResponse(w http.ResponseWriter, d string) {\n\tw.WriteHeader(200)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Write([]byte(d))\n\treturn\n}", "func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *CreateProgramOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func (o *VerifyAccountCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (c *SwitchVersion) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postSwitchVersion(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *PutRecordingsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyHealthCredentialOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}", "func (o *NewDiscoveryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *WeaviateThingsGetNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *UpdateLinkInPostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetChatroomsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetUIContentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ListVsphereResourceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ResponseWrite(w http.ResponseWriter, responseCode int, responseData interface{}) {\n\t// Write Response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(responseCode)\n\n\t// Write JSON to Response\n\tjson.NewEncoder(w).Encode(responseData)\n}", "func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}", "func (o *PostKeysKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func (o *CreateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WriteResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PutQuestionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}", "func (o *PostOperationsDeleteP2PPathCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *HealthGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyEmailTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsPatchNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (o *WeaviateThingsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *DeleteServiceIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PostOperationsGetNodeEdgePointDetailsCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *UserEditOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviatePeersAnnounceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *CertifyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(writer http.ResponseWriter, response *http.Response) (int64, error) {\n\tdefer response.Body.Close()\n\twriteResponseHeaders(writer, response, false)\n\treturn io.Copy(writer, response.Body)\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *FingerPathsPostCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *PostPlaybookOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPingDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostManagementKubernetesIoV1NodesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *StopAppAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetMeetupsDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostEventCreated) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(201)\n}", "func (o *GetTaskTaskIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateTCPCheckAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PostOperationsGetNetworkElementListCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ServiceInstanceLastOperationGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header RetryAfter\n\n\tretryAfter := o.RetryAfter\n\tif retryAfter != \"\" {\n\t\trw.Header().Set(\"RetryAfter\", retryAfter)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetTaskDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPiecesIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetDetailOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\trw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *LogoutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (o *UploadFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, data interface{}) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": http.StatusOK,\n\t\t},\n\t\t\"data\": data,\n\t}\n\treturn jsonResponse(w, env)\n}", "func (o *WeaviateThingTemplatesCreateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (r *Responder) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\tfor k, v := range r.headers {\n\t\tfor _, val := range v {\n\t\t\trw.Header().Add(k, val)\n\t\t}\n\t}\n\n\trw.WriteHeader(r.code)\n\n\tif r.response != nil {\n\t\tif err := producer.Produce(rw, r.response); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (o *GetGateSourceByGateNameAndMntOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateSpoeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *Output) writeResponse(response string) error {\r\n\t// write the response\r\n\tif _, err := o.writer.WriteString(response + \"\\n\"); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (o *GetTransportByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *TransferOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateUserCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ViewOneOrderOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVisiblePruebasFromQuestionTestInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(500)\n}", "func (o *GetWhaleTranfersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.OperationsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *SearchTournamentsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*models.Tournament, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateTCPCheckCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}" ]
[ "0.81291586", "0.78819287", "0.77723724", "0.7772298", "0.77532965", "0.7740895", "0.7667328", "0.76388013", "0.76095575", "0.75802743", "0.75792146", "0.7567954", "0.75612247", "0.7558208", "0.7545076", "0.75431097", "0.7542526", "0.7535154", "0.75308895", "0.75206727", "0.75192624", "0.7513445", "0.75115013", "0.7506245", "0.75036865", "0.74994856", "0.7488267", "0.7484068", "0.7476975", "0.74681216", "0.7467429", "0.74663514", "0.7464419", "0.74637115", "0.74637115", "0.74621916", "0.74607694", "0.74600816", "0.74461263", "0.7444002", "0.74358237", "0.7427366", "0.7425954", "0.7418714", "0.7413481", "0.74079764", "0.7406604", "0.74053806", "0.7399197", "0.73880255", "0.73864275", "0.7381308", "0.7361386", "0.73605716", "0.73553914", "0.735516", "0.7353125", "0.7348355", "0.734634", "0.7328798", "0.7326309", "0.7318161", "0.73170096", "0.73166984", "0.7316146", "0.7313389", "0.73119754", "0.73103034", "0.73090947", "0.7301638", "0.729702", "0.7292011", "0.7291873", "0.7289617", "0.72853845", "0.7284048", "0.7282259", "0.7280808", "0.72753084", "0.7275278", "0.7273494", "0.72732604", "0.7269464", "0.72693926", "0.7268149", "0.72664154", "0.72615176", "0.72536385", "0.7251536", "0.7249643", "0.72487813", "0.72475266", "0.72414196", "0.723942", "0.7237652", "0.7234592", "0.72287256", "0.72233856", "0.72163224", "0.7215305", "0.72126275" ]
0.0
-1
NewCreateStorageV1CSINodeCreated creates CreateStorageV1CSINodeCreated with default headers values
func NewCreateStorageV1CSINodeCreated() *CreateStorageV1CSINodeCreated { return &CreateStorageV1CSINodeCreated{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewCreateStorageV1CSINodeOK() *CreateStorageV1CSINodeOK {\n\n\treturn &CreateStorageV1CSINodeOK{}\n}", "func (o *CreateStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}", "func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}", "func NewCreateStorageV1CSINodeUnauthorized() *CreateStorageV1CSINodeUnauthorized {\n\n\treturn &CreateStorageV1CSINodeUnauthorized{}\n}", "func NewCreateStorageV1CSINodeAccepted() *CreateStorageV1CSINodeAccepted {\n\n\treturn &CreateStorageV1CSINodeAccepted{}\n}", "func (o *ReplaceStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *ReplaceStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func (service *ContrailService) CreateContrailStorageNode(\n\tctx context.Context,\n\trequest *models.CreateContrailStorageNodeRequest) (*models.CreateContrailStorageNodeResponse, error) {\n\tmodel := request.ContrailStorageNode\n\tif model.UUID == \"\" {\n\t\tmodel.UUID = uuid.NewV4().String()\n\t}\n\tauth := common.GetAuthCTX(ctx)\n\tif auth == nil {\n\t\treturn nil, common.ErrorUnauthenticated\n\t}\n\n\tif model.FQName == nil {\n\t\tif model.DisplayName != \"\" {\n\t\t\tmodel.FQName = []string{auth.DomainID(), auth.ProjectID(), model.DisplayName}\n\t\t} else {\n\t\t\tmodel.FQName = []string{auth.DomainID(), auth.ProjectID(), model.UUID}\n\t\t}\n\t}\n\tmodel.Perms2 = &models.PermType2{}\n\tmodel.Perms2.Owner = auth.ProjectID()\n\tif err := common.DoInTransaction(\n\t\tservice.DB,\n\t\tfunc(tx *sql.Tx) error {\n\t\t\treturn db.CreateContrailStorageNode(ctx, tx, request)\n\t\t}); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err,\n\t\t\t\"resource\": \"contrail_storage_node\",\n\t\t}).Debug(\"db create failed on create\")\n\t\treturn nil, common.ErrorInternal\n\t}\n\treturn &models.CreateContrailStorageNodeResponse{\n\t\tContrailStorageNode: request.ContrailStorageNode,\n\t}, nil\n}", "func (o *CreateStorageV1CSINodeOK) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeOK {\n\to.Payload = payload\n\treturn o\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n retentionEnabled := false\n headers[\"x-emc-retention-period\"] = []string{\"0\"}\n if ecsBucket.Retention != \"\" {\n days, err := strconv.ParseInt(ecsBucket.Retention, 10, 64)\n if err == nil {\n if days > 0 {\n seconds := days * 24 * 3600\n headers[\"x-emc-retention-period\"] = []string{int64toString(seconds)}\n retentionEnabled = true\n }\n }\n }\n var expirationCurrentVersions int64\n expirationCurrentVersions = 0\n if ecsBucket.ExpirationCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationCurrentVersions, 10, 64)\n if err == nil {\n expirationCurrentVersions = days\n }\n }\n var expirationNonCurrentVersions int64\n expirationNonCurrentVersions = 0\n if ecsBucket.ExpirationNonCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationNonCurrentVersions, 10, 64)\n if err == nil && ecsBucket.EnableVersioning {\n expirationNonCurrentVersions = days\n }\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n versioningStatusOK := true\n lifecyclePolicyStatusOK := true\n // If the bucket has been created\n if bucketCreateResponse.Code == 200 {\n if !retentionEnabled && ecsBucket.EnableVersioning {\n // Enable versioning\n enableVersioningHeaders := map[string][]string{}\n enableVersioningHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n versioningConfiguration := `\n <VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\n <Status>Enabled</Status>\n <MfaDelete>Disabled</MfaDelete>\n </VersioningConfiguration>\n `\n enableVersioningResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?versioning\", enableVersioningHeaders, versioningConfiguration)\n if enableVersioningResponse.Code != 200 {\n versioningStatusOK = false\n }\n }\n if expirationCurrentVersions > 0 || expirationNonCurrentVersions > 0 {\n lifecyclePolicyHeaders := map[string][]string{}\n lifecyclePolicyHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n lifecyclePolicyConfiguration := `\n <LifecycleConfiguration>\n <Rule>\n <ID>expiration</ID>\n <Prefix></Prefix>\n <Status>Enabled</Status>\n `\n if expirationCurrentVersions > 0 && expirationNonCurrentVersions > 0 {\n // Enable expiration for both current and non current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n } else {\n if expirationCurrentVersions > 0 {\n // Enable expiration for current versions only\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n }\n if expirationNonCurrentVersions > 0 {\n // Enable expiration for non current versions only\n // To fix a bug in ECS 3.0 where an expiration for non current version can't be set if there's no expiration set for current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>1000000</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n }\n }\n lifecyclePolicyConfiguration += `\n </Rule>\n </LifecycleConfiguration>\n `\n lifecyclePolicyResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?lifecycle\", lifecyclePolicyHeaders, lifecyclePolicyConfiguration)\n if lifecyclePolicyResponse.Code != 200 {\n lifecyclePolicyStatusOK = false\n }\n }\n if versioningStatusOK && lifecyclePolicyStatusOK {\n rendering.JSON(w, http.StatusOK, \"\")\n } else {\n message := \"\"\n if !versioningStatusOK {\n message += \" Versioning can't be enabled.\"\n }\n if !lifecyclePolicyStatusOK {\n message += \" Expiration can't be set.\"\n }\n rendering.JSON(w, http.StatusOK, message)\n }\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, s3.AccessKey, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func (cc *ContrailCommand) CreateNode(host vcenter.ESXIHost) error {\n\tlog.Debug(\"Create Node:\", cc.AuthToken)\n\tnodeResource := contrailCommandNodeSync{\n\t\tResources: []*nodeResources{\n\t\t\t{\n\t\t\t\tKind: \"node\",\n\t\t\t\tData: &nodeData{\n\t\t\t\t\tNodeType: \"esxi\",\n\t\t\t\t\tUUID: host.UUID,\n\t\t\t\t\tHostname: host.Hostname,\n\t\t\t\t\tFqName: []string{\"default-global-system-config\", host.Hostname},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tjsonData, err := json.Marshal(nodeResource)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Sending Request\")\n\tresp, _, err := cc.sendRequest(\"/sync\", string(jsonData), \"POST\") //nolint: bodyclose\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Got status : \", resp.StatusCode)\n\tswitch resp.StatusCode {\n\tdefault:\n\t\treturn fmt.Errorf(\"resource creation failed, %d\", resp.StatusCode)\n\tcase 200, 201:\n\t}\n\treturn nil\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, ecsBucket.User, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n log.Print(bucketCreateResponse)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n log.Print(err)\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func CreateDescribeLogstoreStorageRequest() (request *DescribeLogstoreStorageRequest) {\n\trequest = &DescribeLogstoreStorageRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"DescribeLogstoreStorage\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewCreateIOCDefault(code int) *CreateIOCDefault {\n\treturn &CreateIOCDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (sdk *SDK) NewNode(prefer *cloudsvr.PreferAttrs) (*cloudsvr.CloudNode, *cloudsvr.PreferAttrs, error) {\n\n\tvar (\n\t\tpassword, _ = utils.GenPassword(24)\n\t\treq = &CreateInstanceRequest{\n\t\t\tImageID: OsImage,\n\t\t\tPassword: password,\n\t\t\tInstanceName: NodeName,\n\t\t\tInstanceChargeType: \"PostPaid\", // require RMB 100+\n\t\t\tSecurityGroupID: \"whatever\", // will be automatic rewrite\n\t\t\tInternetChargeType: \"PayByTraffic\", // traffic payment\n\t\t\tInternetMaxBandwidthOut: \"100\", // 100M\n\t\t\tLabels: NodeLabels,\n\t\t}\n\t)\n\n\t// if prefered attributes set, use prefer region & instance-type\n\tif prefer != nil && prefer.Valid() == nil {\n\t\tvar (\n\t\t\treg = prefer.RegionOrZone\n\t\t\ttyp = prefer.InstanceType\n\t\t)\n\t\tlog.Printf(\"create aliyun ecs by using prefered region %s, instance type %s ...\", reg, typ)\n\n\t\treq.RegionID = reg // cn-beijing\n\t\treq.InstanceType = typ // ecs.n4.large\n\n\t\tcreated, err := sdk.createNode(req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tlog.Printf(\"created prefered aliyun ecs succeed: %s\", created.ID)\n\t\treturn created, prefer, nil\n\t}\n\n\tlog.Infoln(\"creating aliyun ecs by trying all regions & types ...\")\n\n\t// if prefered created failed, or without prefer region & instance-type\n\t// try best on all region & instance-types to create the new aliyun ecs\n\tvar (\n\t\tregions []RegionType // all of aliyun regions\n\t\ttypes []InstanceTypeItemType // all of instance types within given range of mems & cpus\n\t\terr error\n\t\tcreated *cloudsvr.CloudNode\n\t)\n\n\t// list all regions\n\tregions, err = sdk.ListRegions()\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListRegions() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\t// list specified range of instance types\n\ttypes, err = sdk.ListInstanceTypes(2, 4, 2, 8) // TODO range of given cpus/mems ranges\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListInstanceTypes() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tvar (\n\t\tuseRegionID, useInsType string\n\t)\n\t// range all regions & types to try to create ecs instance\n\tfor _, reg := range regions {\n\t\tfor _, typ := range types {\n\t\t\treq.RegionID = reg.RegionID // cn-beijing\n\t\t\treq.InstanceType = typ.InstanceTypeID // ecs.n4.large\n\n\t\t\t// if created succeed, directly return\n\t\t\tcreated, err = sdk.createNode(req)\n\t\t\tif err == nil {\n\t\t\t\tuseRegionID, useInsType = reg.RegionID, typ.InstanceTypeID\n\t\t\t\tgoto END\n\t\t\t}\n\n\t\t\tif sdk.isFatalError(err) {\n\t\t\t\tlog.Errorf(\"create aliyun ecs got fatal error, stop retry: %v\", err)\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tlog.Warnf(\"create aliyun ecs failed: %v, will retry another region or type\", err)\n\t\t}\n\t}\n\nEND:\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog.Printf(\"created aliyun ecs %s at %s and type is %s\", created.ID, useRegionID, useInsType)\n\treturn created, &cloudsvr.PreferAttrs{RegionOrZone: useRegionID, InstanceType: useInsType}, nil\n}", "func (s *StorageClusterAPI) Create(w http.ResponseWriter, r *http.Request) {\n\tstorage := &config.StorageCluster{}\n\terr := api.GetJSONBodyFromRequest(r, storage)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\terr = s.storageClusterService.Save(storage)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\tapi.NoContent(w)\n}", "func (service *ContrailService) RESTCreateContrailStorageNode(c echo.Context) error {\n\trequestData := &models.CreateContrailStorageNodeRequest{}\n\tif err := c.Bind(requestData); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err,\n\t\t\t\"resource\": \"contrail_storage_node\",\n\t\t}).Debug(\"bind failed on create\")\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, \"Invalid JSON format\")\n\t}\n\tctx := c.Request().Context()\n\tresponse, err := service.CreateContrailStorageNode(ctx, requestData)\n\tif err != nil {\n\t\treturn common.ToHTTPError(err)\n\t}\n\treturn c.JSON(http.StatusCreated, response)\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n session, err := store.Get(r, \"session-name\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n s3 := S3{\n EndPointString: session.Values[\"Endpoint\"].(string),\n AccessKey: session.Values[\"AccessKey\"].(string),\n SecretKey: session.Values[\"SecretKey\"].(string),\n Namespace: session.Values[\"Namespace\"].(string),\n }\n\n decoder := json.NewDecoder(r.Body)\n var bucket NewBucket\n err = decoder.Decode(&bucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n\n // Add the necessary headers for Metadata Search and Access During Outage\n createBucketHeaders := map[string][]string{}\n createBucketHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n createBucketHeaders[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n createBucketHeaders[\"x-emc-metadata-search\"] = []string{\"ObjectName,x-amz-meta-image-width;Integer,x-amz-meta-image-height;Integer,x-amz-meta-gps-latitude;Decimal,x-amz-meta-gps-longitude;Decimal\"}\n\n createBucketResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/\", createBucketHeaders, \"\")\n\n // Enable CORS after the bucket creation to allow the web browser to send requests directly to ECS\n if createBucketResponse.Code == 200 {\n enableBucketCorsHeaders := map[string][]string{}\n enableBucketCorsHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n corsConfiguration := `\n <CORSConfiguration>\n <CORSRule>\n <AllowedOrigin>*</AllowedOrigin>\n <AllowedHeader>*</AllowedHeader>\n <ExposeHeader>x-amz-meta-image-width</ExposeHeader>\n <ExposeHeader>x-amz-meta-image-height</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-latitude</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-longitude</ExposeHeader>\n <AllowedMethod>HEAD</AllowedMethod>\n <AllowedMethod>GET</AllowedMethod>\n <AllowedMethod>PUT</AllowedMethod>\n <AllowedMethod>POST</AllowedMethod>\n <AllowedMethod>DELETE</AllowedMethod>\n </CORSRule>\n </CORSConfiguration>\n `\n enableBucketCorsResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/?cors\", enableBucketCorsHeaders, corsConfiguration)\n if enableBucketCorsResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, struct {\n CorsConfiguration string `json:\"cors_configuration\"`\n Bucket string `json:\"bucket\"`\n } {\n CorsConfiguration: corsConfiguration,\n Bucket: bucket.Name,\n })\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket created, but CORS can't be enabled\"}\n }\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket can't be created\"}\n }\n return nil\n}", "func (client IdentityClient) createTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagDefaults\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (fs *Ipfs) createNode(ctx context.Context, repoPath string) (icore.CoreAPI, error) {\n\t// Open the repo\n\trepo, err := fsrepo.Open(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct the node\n\tnodeOptions := &core.BuildCfg{\n\t\tOnline: true,\n\n\t\t// This option sets the node to be a full DHT node\n\t\t// (both fetching and storing DHT Records)\n\t\tRouting: libp2p.DHTOption,\n\n\t\t// Routing: libp2p.DHTClientOption,\n\t\t// This option sets the node to be a client DHT node (only fetching records)\n\n\t\tRepo: repo,\n\t}\n\n\tnode, err := core.NewNode(ctx, nodeOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfs.ipfsNode = node\n\n\t// Attach the Core API to the constructed node\n\treturn coreapi.NewCoreAPI(node)\n}", "func newNs(ctx context.Context, cl client.Client, name string) error {\n\tns := &corev1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t}\n\tif err := cl.Create(ctx, ns); err != nil {\n\t\tif !errors.IsAlreadyExists(err) {\n\t\t\treturn fmt.Errorf(\"failed to create namespace %s: %v\", ns.Name, err)\n\t\t}\n\t}\n\treturn nil\n}", "func NewCreateClusterRequestWithoutParam() *CreateClusterRequest {\n\n return &CreateClusterRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/clusters\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func createNewEmptyNode() Node {\n\tnextNewId--\n\treturn Node{\n\t\tId: nextNewId,\n\t\tVisible: true,\n\t\tTimestamp: time.Now().Format(\"2006-01-02T15:04:05Z\"),\n\t\tVersion: \"1\",\n\t}\n}", "func (d *Driver) Create() error {\n\n\tvolume := NodesNodeStorageStorageContentPostParameter{\n\t\tFilename: d.StorageFilename,\n\t\tSize: d.DiskSize + \"G\",\n\t\tVMID: d.VMID,\n\t}\n\n\td.debugf(\"Creating disk volume '%s' with size '%s'\", volume.Filename, volume.Size)\n\tdiskname, err := d.driver.NodesNodeStorageStorageContentPost(d.Node, d.Storage, &volume)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !strings.HasSuffix(diskname, d.StorageFilename) {\n\t\treturn fmt.Errorf(\"returned diskname is not correct: should be '%s' but was '%s'\", d.StorageFilename, diskname)\n\t}\n\n\tnpp := NodesNodeQemuPostParameter{\n\t\tVMID: d.VMID,\n\t\tAgent: \"1\",\n\t\tAutostart: \"1\",\n\t\tMemory: d.Memory,\n\t\tCores: d.Cores,\n\t\tNet0: fmt.Sprintf(\"virtio,bridge=%s\", d.NetBridge),\n\t\tSCSI0: d.StorageFilename,\n\t\tOstype: \"l26\",\n\t\tName: d.BaseDriver.MachineName,\n\t\tKVM: \"1\", // if you test in a nested environment, you may have to change this to 0 if you do not have nested virtualization\n\t\tCdrom: d.ImageFile,\n\t\tPool: d.Pool,\n\t}\n\n\tif d.NetVlanTag != 0 {\n\t\tnpp.Net0 = fmt.Sprintf(\"virtio,bridge=%s,tag=%d\", d.NetBridge, d.NetVlanTag)\n\t}\n\n\tif d.StorageType == \"qcow2\" {\n\t\tnpp.SCSI0 = d.Storage + \":\" + d.VMID + \"/\" + volume.Filename\n\t} else if d.StorageType == \"raw\" {\n\t\tif strings.HasSuffix(volume.Filename, \".raw\") {\n\t\t\t// raw files (having .raw) should have the VMID in the path\n\t\t\tnpp.SCSI0 = d.Storage + \":\" + d.VMID + \"/\" + volume.Filename\n\t\t} else {\n\t\t\tnpp.SCSI0 = d.Storage + \":\" + volume.Filename\n\t\t}\n\t}\n\td.debugf(\"Creating VM '%s' with '%d' of memory\", npp.VMID, npp.Memory)\n\ttaskid, err := d.driver.NodesNodeQemuPost(d.Node, &npp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = d.driver.WaitForTaskToComplete(d.Node, taskid)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn d.waitAndPrepareSSH()\n}", "func ContainerCreateFile(\n\tctx context.Context,\n\tclient api.Client,\n\tcontainerPath, fileName string,\n\tfileSize int,\n\tfileMode FileMode,\n\tfileHndl io.ReadCloser,\n\toverwrite bool) error {\n\n\tvar params api.OrderedValues\n\tif !overwrite {\n\t\tparams = overwriteFalseQueryString\n\t}\n\n\treturn client.Put(\n\t\tctx,\n\t\trealNamespacePath(client),\n\t\tpath.Join(containerPath, fileName),\n\t\tparams,\n\t\tmap[string]string{\n\t\t\t\"x-isi-ifs-target-type\": \"object\",\n\t\t\t\"x-isi-ifs-access-control\": fileMode.String(),\n\t\t\t\"Content-Length\": fmt.Sprintf(\"%d\", fileSize),\n\t\t},\n\t\tfileHndl,\n\t\tnil)\n}", "func NewWatchStorageV1CSINodeListOK() *WatchStorageV1CSINodeListOK {\n\treturn &WatchStorageV1CSINodeListOK{}\n}", "func (s *SmartContract) CreateOi(ctx contractapi.TransactionContextInterface, oiNumber string, saudacao string, despedida string, oidenovo string, pessoa string) error {\n\tOi := Oi{\n\t\tSaudacao: saudacao,\n\t\tDespedida: despedida,\n\t\tOidenovo: oidenovo,\n\t\tPessoa: pessoa,\n\t}\n\n\toiAsBytes, _ := json.Marshal(Oi)\n\n\treturn ctx.GetStub().PutState(oiNumber, oiAsBytes)\n}", "func (client StorageGatewayClient) createStorageGateway(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateStorageGatewayResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (d *driver) CreateStorage(cr *opapi.ImageRegistry, modified *bool) error {\n\tsvc, err := d.getSVC()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tic, err := util.GetInstallConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcv, err := util.GetClusterVersionConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := 0; i < 5000; i++ {\n\t\tif len(d.Config.Bucket) == 0 {\n\t\t\td.Config.Bucket = fmt.Sprintf(\"%s-%s-%s-%s\", clusterconfig.StoragePrefix, d.Config.Region, strings.Replace(string(cv.Spec.ClusterID), \"-\", \"\", -1), strings.Replace(string(uuid.NewUUID()), \"-\", \"\", -1))[0:62]\n\t\t}\n\n\t\t_, err := svc.CreateBucket(&s3.CreateBucketInput{\n\t\t\tBucket: aws.String(d.Config.Bucket),\n\t\t})\n\t\tif err != nil {\n\t\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\t\tswitch aerr.Code() {\n\t\t\t\tcase s3.ErrCodeBucketAlreadyExists:\n\t\t\t\t\tif cr.Spec.Storage.S3.Bucket != \"\" {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\td.Config.Bucket = \"\"\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tutil.UpdateCondition(cr, opapi.StorageExists, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\n\tif len(cr.Spec.Storage.S3.Bucket) == 0 && len(d.Config.Bucket) == 0 {\n\t\tutil.UpdateCondition(cr, opapi.StorageExists, operatorapi.ConditionFalse, \"Unable to Generate Unique Bucket Name\", \"\", modified)\n\t\treturn fmt.Errorf(\"unable to generate a unique s3 bucket name\")\n\t}\n\n\t// Wait until the bucket exists\n\tif err := svc.WaitUntilBucketExists(&s3.HeadBucketInput{\n\t\tBucket: aws.String(d.Config.Bucket),\n\t}); err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageExists, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t}\n\n\t\treturn err\n\t}\n\n\t// Tag the bucket with the openshiftClusterID\n\t// along with any user defined tags from the cluster configuration\n\tif ic.Platform.AWS != nil {\n\t\tvar tagSet []*s3.Tag\n\t\ttagSet = append(tagSet, &s3.Tag{Key: aws.String(\"openshiftClusterID\"), Value: aws.String(string(cv.Spec.ClusterID))})\n\t\tfor k, v := range ic.Platform.AWS.UserTags {\n\t\t\ttagSet = append(tagSet, &s3.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t\t}\n\n\t\t_, err := svc.PutBucketTagging(&s3.PutBucketTaggingInput{\n\t\t\tBucket: aws.String(d.Config.Bucket),\n\t\t\tTagging: &s3.Tagging{\n\t\t\t\tTagSet: tagSet,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\t\tutil.UpdateCondition(cr, opapi.StorageTagged, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t\t} else {\n\t\t\t\tutil.UpdateCondition(cr, opapi.StorageTagged, operatorapi.ConditionFalse, \"Unknown Error Occurred\", err.Error(), modified)\n\t\t\t}\n\t\t} else {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageTagged, operatorapi.ConditionTrue, \"Tagging Successful\", \"UserTags were successfully applied to the S3 bucket\", modified)\n\t\t}\n\t}\n\n\t// Enable default encryption on the bucket\n\t_, err = svc.PutBucketEncryption(&s3.PutBucketEncryptionInput{\n\t\tBucket: aws.String(d.Config.Bucket),\n\t\tServerSideEncryptionConfiguration: &s3.ServerSideEncryptionConfiguration{\n\t\t\tRules: []*s3.ServerSideEncryptionRule{\n\t\t\t\t{\n\t\t\t\t\tApplyServerSideEncryptionByDefault: &s3.ServerSideEncryptionByDefault{\n\t\t\t\t\t\tSSEAlgorithm: aws.String(s3.ServerSideEncryptionAes256),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageEncrypted, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t} else {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageEncrypted, operatorapi.ConditionFalse, \"Unknown Error Occurred\", err.Error(), modified)\n\t\t}\n\t} else {\n\t\tutil.UpdateCondition(cr, opapi.StorageEncrypted, operatorapi.ConditionTrue, \"Encryption Successful\", \"Default encryption was successfully enabled on the S3 bucket\", modified)\n\t}\n\n\t// Enable default incomplete multipart upload cleanup after one (1) day\n\t_, err = svc.PutBucketLifecycleConfiguration(&s3.PutBucketLifecycleConfigurationInput{\n\t\tBucket: aws.String(d.Config.Bucket),\n\t\tLifecycleConfiguration: &s3.BucketLifecycleConfiguration{\n\t\t\tRules: []*s3.LifecycleRule{\n\t\t\t\t{\n\t\t\t\t\tID: aws.String(\"cleanup-incomplete-multipart-registry-uploads\"),\n\t\t\t\t\tStatus: aws.String(\"Enabled\"),\n\t\t\t\t\tFilter: &s3.LifecycleRuleFilter{\n\t\t\t\t\t\tPrefix: aws.String(\"\"),\n\t\t\t\t\t},\n\t\t\t\t\tAbortIncompleteMultipartUpload: &s3.AbortIncompleteMultipartUpload{\n\t\t\t\t\t\tDaysAfterInitiation: aws.Int64(1),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageIncompleteUploadCleanupEnabled, operatorapi.ConditionFalse, aerr.Code(), aerr.Error(), modified)\n\t\t} else {\n\t\t\tutil.UpdateCondition(cr, opapi.StorageIncompleteUploadCleanupEnabled, operatorapi.ConditionFalse, \"Unknown Error Occurred\", err.Error(), modified)\n\t\t}\n\t} else {\n\t\tutil.UpdateCondition(cr, opapi.StorageIncompleteUploadCleanupEnabled, operatorapi.ConditionTrue, \"Enable Cleanup Successful\", \"Default cleanup of incomplete multipart uploads after one (1) day was successfully enabled\", modified)\n\t}\n\n\tcr.Status.Storage.State.S3 = d.Config\n\tcr.Status.Storage.Managed = true\n\n\tutil.UpdateCondition(cr, opapi.StorageExists, operatorapi.ConditionTrue, \"Creation Successful\", \"S3 bucket was successfully created\", modified)\n\n\treturn nil\n}", "func New(spaceID, id, parentID, name string, blobsize int64, blobID string, t provider.ResourceType, owner *userpb.UserId, lu PathLookup) *Node {\n\tif blobID == \"\" {\n\t\tblobID = uuid.New().String()\n\t}\n\treturn &Node{\n\t\tSpaceID: spaceID,\n\t\tID: id,\n\t\tParentID: parentID,\n\t\tName: name,\n\t\tBlobsize: blobsize,\n\t\towner: owner,\n\t\tlu: lu,\n\t\tBlobID: blobID,\n\t\tnodeType: &t,\n\t}\n}", "func newCbsNode(region string, volumeAttachLimit int64) (*cbsNode, error) {\n\tsecretID, secretKey, token, _ := util.GetSercet()\n\tcred := &common.Credential{\n\t\tSecretId: secretID,\n\t\tSecretKey: secretKey,\n\t\tToken: token,\n\t}\n\n\tclient, err := cbs.NewClient(cred, region, profile.NewClientProfile())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnode := cbsNode{\n\t\tmetadataClient: metadata.NewMetaData(http.DefaultClient),\n\t\tcbsClient: client,\n\t\tmounter: mount.SafeFormatAndMount{\n\t\t\tInterface: mount.New(\"\"),\n\t\t\tExec: exec.New(),\n\t\t},\n\t\tidempotent: util.NewIdempotent(),\n\t\tvolumeAttachLimit: volumeAttachLimit,\n\t}\n\treturn &node, nil\n}", "func createStorageProfile(masterIp string, sshClientConfig *ssh.ClientConfig,\n\tstoragePolicyName string, clientIndex int) error {\n\tcreateStoragePolicy := govcLoginCmdForMultiVC(clientIndex) +\n\t\t\"govc storage.policy.create -category=shared-cat-todelete1 -tag=shared-tag-todelete1 \" + storagePolicyName\n\tframework.Logf(\"Create storage policy: %s \", createStoragePolicy)\n\tcreateStoragePolicytRes, err := sshExec(sshClientConfig, masterIp, createStoragePolicy)\n\tif err != nil && createStoragePolicytRes.Code != 0 {\n\t\tfssh.LogResult(createStoragePolicytRes)\n\t\treturn fmt.Errorf(\"couldn't execute command: %s on host: %v , error: %s\",\n\t\t\tcreateStoragePolicy, masterIp, err)\n\t}\n\treturn nil\n}", "func (client *NetworkToNetworkInterconnectsClient) createCreateRequest(ctx context.Context, resourceGroupName string, networkFabricName string, networkToNetworkInterconnectName string, body NetworkToNetworkInterconnect, options *NetworkToNetworkInterconnectsClientBeginCreateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkFabrics/{networkFabricName}/networkToNetworkInterconnects/{networkToNetworkInterconnectName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif networkFabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkFabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkFabricName}\", url.PathEscape(networkFabricName))\n\tif networkToNetworkInterconnectName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkToNetworkInterconnectName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkToNetworkInterconnectName}\", url.PathEscape(networkToNetworkInterconnectName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func New(id, parentID, name string, blobsize int64, blobID string, owner *userpb.UserId, lu PathLookup) *Node {\n\tif blobID == \"\" {\n\t\tblobID = uuid.New().String()\n\t}\n\treturn &Node{\n\t\tID: id,\n\t\tParentID: parentID,\n\t\tName: name,\n\t\tBlobsize: blobsize,\n\t\towner: owner,\n\t\tlu: lu,\n\t\tBlobID: blobID,\n\t}\n}", "func CreateNewNode(nodeID int, fileName string, timeStart string, graphID int, fileType string, userID int) (Node, error) {\n\tif nodeID == 0 || fileName == \"\" || timeStart == \"\" || graphID == 0 || fileType == \"\" || userID == 0 {\n\t\treturn Node{}, errors.New(\"Not enough argument supplied\")\n\t}\n\treturn Node{\n\t\tNodeID: nodeID,\n\t\tNodeBits: 0,\n\t\tNodeDesc: fileName,\n\t\tNodeDT: timeStart,\n\t\tNodeGID: graphID,\n\t\tNodeHash: \"\",\n\t\tNodeLevel: -32768,\n\t\tNodeType: fileType,\n\t\tNodeUID: userID,\n\t}, nil\n}", "func (s *StorageBase) New(ctx context.Context, ttl time.Duration) (id string, err error) {\n\treturn \"\", ErrorDisabled\n}", "func (o *CreateStorageV1CSINodeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r Virtual_Guest_Block_Device_Template_Group) CreateFromIcos(configuration *datatypes.Container_Virtual_Guest_Block_Device_Template_Configuration) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) {\n\tparams := []interface{}{\n\t\tconfiguration,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest_Block_Device_Template_Group\", \"createFromIcos\", params, &r.Options, &resp)\n\treturn\n}", "func EncodeStorageImagesCreateRequest(encoder func(*http.Request) goahttp.Encoder) func(*http.Request, interface{}) error {\n\treturn func(req *http.Request, v interface{}) error {\n\t\tp, ok := v.(*vm.Storage)\n\t\tif !ok {\n\t\t\treturn goahttp.ErrInvalidType(\"spin-registry\", \"storage_images_create\", \"*vm.Storage\", v)\n\t\t}\n\t\tbody := p\n\t\tif err := encoder(req).Encode(&body); err != nil {\n\t\t\treturn goahttp.ErrEncodingError(\"spin-registry\", \"storage_images_create\", err)\n\t\t}\n\t\treturn nil\n\t}\n}", "func CreateCreateFileSystemRequest() (request *CreateFileSystemRequest) {\n\trequest = &CreateFileSystemRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DFS\", \"2018-06-20\", \"CreateFileSystem\", \"alidfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (api *nodeAPI) Create(obj *cluster.Node) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.ClusterV1().Node().Create(context.Background(), obj)\n\t\tif err != nil && strings.Contains(err.Error(), \"AlreadyExists\") {\n\t\t\t_, err = apicl.ClusterV1().Node().Update(context.Background(), obj)\n\n\t\t}\n\t\treturn err\n\t}\n\n\tapi.ct.handleNodeEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Created})\n\treturn nil\n}", "func (r *ProjectsLocationsNfsSharesService) Create(parent string, nfsshare *NfsShare) *ProjectsLocationsNfsSharesCreateCall {\n\tc := &ProjectsLocationsNfsSharesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\tc.nfsshare = nfsshare\n\treturn c\n}", "func (sdk *SDK) createNode(req *CreateInstanceRequest) (*cloudsvr.CloudNode, error) {\n\tvar err error\n\n\t// create ecs firstly\n\tecsID, err := sdk.NewEcs(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"aliyun ecs create failed: %v\", err)\n\t}\n\tlog.Printf(\"aliyun ecs %s created at %s\", ecsID, req.RegionID)\n\n\t// if create succeed, but other operations failed, clean up the newly created ecs instance to prevent garbage left\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"aliyun cloud node creation failed, clean up the newly created ecs instance %s. [%v]\", ecsID, err)\n\t\t\tsdk.RemoveNode(&cloudsvr.CloudNode{ID: ecsID, RegionOrZoneID: req.RegionID})\n\t\t}\n\t}()\n\n\t// now ecs is stopped, we assign an public ip to it\n\tip, err := sdk.AssignEcsPublicIP(ecsID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"aliyun ecs %s assign public ip failed: %v\", ecsID, err)\n\t}\n\tlog.Printf(\"aliyun ecs %s assgined public ipaddress %s\", ecsID, ip)\n\n\t// start ecs\n\tif err = sdk.StartEcs(ecsID); err != nil {\n\t\treturn nil, fmt.Errorf(\"aliyun ecs %s start failed: %v\", ecsID, err)\n\t}\n\tlog.Printf(\"aliyun ecs %s starting\", ecsID)\n\n\t// wait ecs to be running\n\tif err = sdk.WaitEcs(req.RegionID, ecsID, \"Running\", time.Second*300); err != nil {\n\t\treturn nil, fmt.Errorf(\"aliyun ecs %s waitting to be running failed: %v\", ecsID, err)\n\t}\n\tlog.Printf(\"aliyun ecs %s is Running now\", ecsID)\n\n\treturn &cloudsvr.CloudNode{\n\t\tID: ecsID,\n\t\tRegionOrZoneID: req.RegionID,\n\t\tInstanceType: req.InstanceType,\n\t\tCloudSvrType: sdk.Type(),\n\t\tIPAddr: ip,\n\t\tPort: \"22\",\n\t\tUser: \"root\",\n\t\tPassword: req.Password,\n\t}, nil\n}", "func Create(req clusterapi.Request) (clusterapi.ClusterAPI, error) {\n\t// Validates parameters\n\tif req.Name == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.Name: can't be empty\")\n\t}\n\tif req.CIDR == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.CIDR: can't be empty\")\n\t}\n\n\t// We need at first the Metadata container to be present\n\terr := utils.CreateMetadataContainer()\n\tif err != nil {\n\t\tfmt.Printf(\"failed to create Object Container: %s\\n\", err.Error())\n\t}\n\n\tvar network *pb.Network\n\tvar instance clusterapi.ClusterAPI\n\n\tlog.Printf(\"Creating infrastructure for cluster '%s'\", req.Name)\n\n\ttenant, err := utils.GetCurrentTenant()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Creates network\n\tlog.Printf(\"Creating Network 'net-%s'\", req.Name)\n\treq.Name = strings.ToLower(req.Name)\n\tnetwork, err = utils.CreateNetwork(\"net-\"+req.Name, req.CIDR)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to create Network '%s': %s\", req.Name, err.Error())\n\t\treturn nil, err\n\t}\n\n\tswitch req.Flavor {\n\tcase Flavor.DCOS:\n\t\treq.NetworkID = network.ID\n\t\treq.Tenant = tenant\n\t\tinstance, err = dcos.NewCluster(req)\n\t\tif err != nil {\n\t\t\t//utils.DeleteNetwork(network.ID)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Printf(\"Cluster '%s' created and initialized successfully\", req.Name)\n\treturn instance, nil\n}", "func NewStorage(opts generic.RESTOptions, connection client.ConnectionInfoGetter, proxyTransport http.RoundTripper) NodeStorage {\n\tprefix := \"/minions\"\n\n\tnewListFunc := func() runtime.Object { return &api.NodeList{} }\n\tstorageInterface := opts.Decorator(\n\t\topts.Storage, cachesize.GetWatchCacheSizeByResource(cachesize.Nodes), &api.Node{}, prefix, node.Strategy, newListFunc)\n\n\tstore := &etcdgeneric.Etcd{\n\t\tNewFunc: func() runtime.Object { return &api.Node{} },\n\t\tNewListFunc: newListFunc,\n\t\tKeyRootFunc: func(ctx api.Context) string {\n\t\t\treturn prefix\n\t\t},\n\t\tKeyFunc: func(ctx api.Context, name string) (string, error) {\n\t\t\treturn etcdgeneric.NoNamespaceKeyFunc(ctx, prefix, name)\n\t\t},\n\t\tObjectNameFunc: func(obj runtime.Object) (string, error) {\n\t\t\treturn obj.(*api.Node).Name, nil\n\t\t},\n\t\tPredicateFunc: node.MatchNode,\n\t\tQualifiedResource: api.Resource(\"nodes\"),\n\t\tDeleteCollectionWorkers: opts.DeleteCollectionWorkers,\n\n\t\tCreateStrategy: node.Strategy,\n\t\tUpdateStrategy: node.Strategy,\n\t\tExportStrategy: node.Strategy,\n\n\t\tStorage: storageInterface,\n\t}\n\n\tstatusStore := *store\n\tstatusStore.UpdateStrategy = node.StatusStrategy\n\n\tnodeREST := &REST{store, connection, proxyTransport}\n\n\treturn NodeStorage{\n\t\tNode: nodeREST,\n\t\tStatus: &StatusREST{store: &statusStore},\n\t\tProxy: &noderest.ProxyREST{Store: store, Connection: client.ConnectionInfoGetter(nodeREST), ProxyTransport: proxyTransport},\n\t}\n}", "func NewReplaceStorageV1CSINodeOK() *ReplaceStorageV1CSINodeOK {\n\n\treturn &ReplaceStorageV1CSINodeOK{}\n}", "func createDefaultStorageClass(kubernetesClient *kubernetes.Clientset, provisioner string, volumeBindingMode storagev1.VolumeBindingMode, parameters map[string]string) error {\n\tdefaultStorageClass := storagev1.StorageClass{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"default\",\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tstorageUtil.IsDefaultStorageClassAnnotation: \"true\",\n\t\t\t},\n\t\t},\n\t\tVolumeBindingMode: &volumeBindingMode,\n\t\tProvisioner: provisioner,\n\t\tParameters: parameters,\n\t}\n\n\t_, err := kubernetesClient.StorageV1().StorageClasses().Create(&defaultStorageClass)\n\tif k8serr.ReasonForError(err) == metav1.StatusReasonAlreadyExists {\n\t\t_, err = kubernetesClient.StorageV1().StorageClasses().Update(&defaultStorageClass)\n\t\tif err != nil {\n\t\t\treturn errors.WrapIf(err, \"create storage class failed\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func NewNode(host string, size int) Node {\n\treturn node{host: host, size: size}\n}", "func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*AppendBlobCreateResponse, error) {\n\tif err := validate([]validation{\n\t\t{targetValue: timeout,\n\t\t\tconstraints: []constraint{{target: \"timeout\", name: null, rule: false,\n\t\t\t\tchain: []constraint{{target: \"timeout\", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.(*AppendBlobCreateResponse), err\n}", "func CREATE(nowNode dinfo.DirectoryNode, name string, thisType int, size int) dinfo.DirectoryNode {\n\tnowNode.Nfile.FileNodes = append(nowNode.Nfile.FileNodes, ninfo.FileNode{\n\t\tName: name,\n\t\tType: thisType,\n\t\tSize: size,\n\t})\n\n\treturn nowNode\n}", "func (c *Client) CreateNode(ctx context.Context, n *NodeInfo, opts NodeOpts) error {\n\tif n == nil || n.NetworkID == \"\" {\n\t\treturn errors.New(\"invalid configuration provided\")\n\t}\n\n\t// make sure important fields are all populated\n\tn.withDefaults()\n\n\t// set up logger to record process events\n\tvar l = log.NewProcessLogger(c.l, \"create_node\",\n\t\t\"network_id\", n.NetworkID)\n\n\t// initialize node assets, such as swarm keys and startup scripts\n\tif err := c.initNodeAssets(n, opts); err != nil {\n\t\tl.Warnw(\"failed to init filesystem for node\", \"error\", err)\n\t\treturn fmt.Errorf(\"failed to set up filesystem for node: %s\", err.Error())\n\t}\n\n\t// set up basic configuration\n\tvar (\n\t\tports = nat.PortMap{\n\t\t\t// TODO: make this private - blocked by lack of multiaddr support for /http\n\t\t\t// paths, which means delegator can't work with go-ipfs swarm.\n\t\t\t// See https://github.com/multiformats/multiaddr/issues/63\n\t\t\tcontainerSwarmPort + \"/tcp\": []nat.PortBinding{\n\t\t\t\t{HostIP: network.Public, HostPort: n.Ports.Swarm}},\n\n\t\t\t// API server connections can be made via delegator. Suffers from same\n\t\t\t// issue as above, but direct API exposure is dangeorous since it is\n\t\t\t// authenticated. Delegator can handle authentication\n\t\t\tcontainerAPIPort + \"/tcp\": []nat.PortBinding{\n\t\t\t\t{HostIP: network.Private, HostPort: n.Ports.API}},\n\n\t\t\t// Gateway connections can be made via delegator, with access controlled\n\t\t\t// by database\n\t\t\tcontainerGatewayPort + \"/tcp\": []nat.PortBinding{\n\t\t\t\t{HostIP: network.Private, HostPort: n.Ports.Gateway}},\n\t\t}\n\t\tvolumes = []string{\n\t\t\tc.getDataDir(n.NetworkID) + \":/data/ipfs\",\n\t\t\tc.getDataDir(n.NetworkID) + \"/ipfs_start:/usr/local/bin/start_ipfs\",\n\t\t}\n\t\trestartPolicy = container.RestartPolicy{Name: \"unless-stopped\"}\n\n\t\t// important metadata about node\n\t\tlabels = n.labels(n.BootstrapPeers, c.getDataDir(n.NetworkID))\n\t)\n\n\t// remove restart policy if AutoRemove is enabled\n\tif opts.AutoRemove {\n\t\trestartPolicy = container.RestartPolicy{}\n\t}\n\n\t// create ipfs node container\n\tcontainerConfig := &container.Config{\n\t\tImage: c.ipfsImage,\n\t\tCmd: []string{\n\t\t\t\"daemon\", \"--migrate=true\", \"--enable-pubsub-experiment\",\n\t\t},\n\t\tEnv: []string{\n\t\t\t\"LIBP2P_FORCE_PNET=1\", // enforce private networks\n\t\t},\n\t\tLabels: labels,\n\t\tTty: true,\n\t\tAttachStdout: true,\n\t\tAttachStderr: true,\n\t}\n\tcontainerHostConfig := &container.HostConfig{\n\t\tAutoRemove: opts.AutoRemove,\n\t\tRestartPolicy: restartPolicy,\n\t\tBinds: volumes,\n\t\tPortBindings: ports,\n\t\tResources: containerResources(n),\n\t}\n\n\tvar start = time.Now()\n\tl = l.With(\"container.name\", n.ContainerName)\n\tl.Debugw(\"creating network container\",\n\t\t\"container.config\", containerConfig,\n\t\t\"container.host_config\", containerHostConfig)\n\tresp, err := c.d.ContainerCreate(ctx, containerConfig, containerHostConfig, nil, n.ContainerName)\n\tif err != nil {\n\t\tl.Errorw(\"failed to create container\",\n\t\t\t\"error\", err, \"build.duration\", time.Since(start))\n\t\treturn fmt.Errorf(\"failed to instantiate node: %s\", err.Error())\n\t}\n\tl = l.With(\"container.id\", resp.ID)\n\tl.Infow(\"container created\",\n\t\t\"build.duration\", time.Since(start))\n\n\t// check for warnings\n\tif len(resp.Warnings) > 0 {\n\t\tl.Warnw(\"warnings encountered on container build\",\n\t\t\t\"warnings\", resp.Warnings)\n\t}\n\n\t// assign node metadata\n\tn.DockerID = resp.ID\n\tn.DataDir = c.getDataDir(n.NetworkID)\n\n\t// spin up node\n\tl.Info(\"starting container\")\n\tstart = time.Now()\n\tif err := c.d.ContainerStart(ctx, n.DockerID, types.ContainerStartOptions{}); err != nil {\n\t\tl.Errorw(\"error occurred on startup - removing container\",\n\t\t\t\"error\", err, \"start.duration\", time.Since(start))\n\t\tgo c.d.ContainerRemove(ctx, n.ContainerName, types.ContainerRemoveOptions{Force: true})\n\t\treturn fmt.Errorf(\"failed to start ipfs node: %s\", err.Error())\n\t}\n\n\t// wait for node to start\n\tif err := c.waitForNode(ctx, n.DockerID); err != nil {\n\t\tl.Errorw(\"error occurred waiting for IPFS daemon startup\",\n\t\t\t\"error\", err, \"start.duration\", time.Since(start))\n\t\treturn err\n\t}\n\n\t// bootstrap peers if required\n\tif len(n.BootstrapPeers) > 0 {\n\t\tl.Debugw(\"bootstrapping network node with provided peers\")\n\t\tif err := c.bootstrapNode(ctx, n.DockerID, n.BootstrapPeers...); err != nil {\n\t\t\tl.Warnw(\"failed to bootstrap node - stopping container\",\n\t\t\t\t\"error\", err, \"start.duration\", time.Since(start))\n\t\t\tgo c.StopNode(ctx, n)\n\t\t\treturn fmt.Errorf(\"failed to bootstrap network node with provided peers: %s\", err.Error())\n\t\t}\n\t}\n\n\t// everything is good to go\n\tl.Infow(\"network container started without issue\",\n\t\t\"start.duration\", time.Since(start))\n\treturn nil\n}", "func (ctx *UploadOpmlContext) Created() error {\n\tctx.ResponseData.WriteHeader(201)\n\treturn nil\n}", "func CreateNamespace(projectCode, clusterID, name, creator string) error {\n\tbcsCCConf := config.GlobalConf.BCSCC\n\tif !bcsCCConf.Enable {\n\t\treturn nil\n\t}\n\tmodel := store.GetModel()\n\tp, err := model.GetProject(context.Background(), projectCode)\n\tif err != nil {\n\t\tlogging.Error(\"get project by code %s failed, err: %s\", projectCode, err.Error())\n\t\treturn err\n\t}\n\trealPath := fmt.Sprintf(createNamespacePath, p.ProjectID, clusterID)\n\tlogging.Info(\"request url: %s, creator: %s\", realPath, creator)\n\treqURL := fmt.Sprintf(\"%s%s\", bcsCCConf.Host, realPath)\n\tdata := map[string]interface{}{\n\t\t\"name\": name,\n\t\t\"creator\": creator,\n\t\t\"env_type\": \"prod\",\n\t\t\"has_image_secret\": false,\n\t}\n\treq := gorequest.SuperAgent{\n\t\tUrl: reqURL,\n\t\tMethod: \"POST\",\n\t\tData: data,\n\t}\n\treq.QueryData = url.Values{}\n\tif bcsCCConf.UseGateway {\n\t\tdata[\"app_code\"] = config.GlobalConf.App.Code\n\t\tdata[\"app_secret\"] = config.GlobalConf.App.Secret\n\t} else {\n\t\taccessToken, err := GetAccessToken()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.QueryData.Add(\"access_token\", accessToken)\n\t}\n\tlogging.Info(\"req data:%v\", req)\n\treturn requestCommonAndParse(req)\n}", "func createNewNode(ctx context.Context, nodeName string, virtual bool, clientset kubernetes.Interface) (*corev1.Node, error) {\n\tresources := corev1.ResourceList{}\n\tresources[corev1.ResourceCPU] = *resource.NewScaledQuantity(5000, resource.Milli)\n\tresources[corev1.ResourceMemory] = *resource.NewScaledQuantity(5, resource.Mega)\n\tnode := &corev1.Node{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: nodeName,\n\t\t},\n\t}\n\tif virtual {\n\t\tnode.Labels = map[string]string{\n\t\t\tconsts.TypeLabel: consts.TypeNode,\n\t\t}\n\t}\n\tnode.Status = corev1.NodeStatus{\n\t\tCapacity: resources,\n\t\tAllocatable: resources,\n\t\tConditions: []corev1.NodeCondition{\n\t\t\t0: {\n\t\t\t\tType: corev1.NodeReady,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t},\n\t\t},\n\t}\n\tnode, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn node, nil\n}", "func CreateStorage(projectID, description, plan, facility, frequency string, size, count int) error {\n\tclient, err := NewExtPacketClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create a snapshot policy. If `count` is 0, we shall pass an empty object\n\tsnapshotPolicies := make([]*extpackngo.SnapshotPolicy, 1)\n\tif count != 0 {\n\t\tsnapshotPolicies = append(snapshotPolicies, &extpackngo.SnapshotPolicy{\n\t\t\tSnapshotFrequency: frequency,\n\t\t\tSnapshotCount: count})\n\t}\n\trequest := &extpackngo.StorageCreateRequest{\n\t\tDescription: description,\n\t\tPlan: plan,\n\t\tSize: size,\n\t\tFacility: facility,\n\t\tSnapshotPolicies: snapshotPolicies,\n\t}\n\n\tstorage, _, err := client.Storages.Create(projectID, request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te := MarshallAndPrint(storage)\n\treturn e\n}", "func Create(c *gophercloud.ServiceClient, containerName, objectName string, content io.ReadSeeker, opts CreateOptsBuilder) CreateResult {\n\tvar res CreateResult\n\n\turl := createURL(c, containerName, objectName)\n\th := make(map[string]string)\n\n\tif opts != nil {\n\t\theaders, query, err := opts.ToObjectCreateParams()\n\t\tif err != nil {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\n\t\tfor k, v := range headers {\n\t\t\th[k] = v\n\t\t}\n\n\t\turl += query\n\t}\n\n\thash := md5.New()\n\tbufioReader := bufio.NewReader(io.TeeReader(content, hash))\n\tio.Copy(ioutil.Discard, bufioReader)\n\tlocalChecksum := hash.Sum(nil)\n\n\th[\"ETag\"] = fmt.Sprintf(\"%x\", localChecksum)\n\n\t_, err := content.Seek(0, 0)\n\tif err != nil {\n\t\tres.Err = err\n\t\treturn res\n\t}\n\n\tropts := gophercloud.RequestOpts{\n\t\tRawBody: content,\n\t\tMoreHeaders: h,\n\t}\n\n\tresp, err := c.Request(\"PUT\", url, ropts)\n\tif err != nil {\n\t\tres.Err = err\n\t\treturn res\n\t}\n\tif resp != nil {\n\t\tres.Header = resp.Header\n\t\tif resp.Header.Get(\"ETag\") == fmt.Sprintf(\"%x\", localChecksum) {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\t\tres.Err = fmt.Errorf(\"Local checksum does not match API ETag header\")\n\t}\n\n\treturn res\n}", "func (o *CreateStorageV1CSINodeAccepted) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeAccepted {\n\to.Payload = payload\n\treturn o\n}", "func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) {\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"restype\", \"container\")\n\tif options != nil && options.Timeout != nil {\n\t\treqQP.Set(\"timeout\", strconv.FormatInt(int64(*options.Timeout), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.Metadata != nil {\n\t\tfor k, v := range options.Metadata {\n\t\t\tif v != nil {\n\t\t\t\treq.Raw().Header[\"x-ms-meta-\"+k] = []string{*v}\n\t\t\t}\n\t\t}\n\t}\n\tif options != nil && options.Access != nil {\n\t\treq.Raw().Header[\"x-ms-blob-public-access\"] = []string{string(*options.Access)}\n\t}\n\treq.Raw().Header[\"x-ms-version\"] = []string{\"2020-10-02\"}\n\tif options != nil && options.RequestID != nil {\n\t\treq.Raw().Header[\"x-ms-client-request-id\"] = []string{*options.RequestID}\n\t}\n\tif containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil {\n\t\treq.Raw().Header[\"x-ms-default-encryption-scope\"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope}\n\t}\n\tif containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil {\n\t\treq.Raw().Header[\"x-ms-deny-encryption-scope-override\"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/xml\"}\n\treturn req, nil\n}", "func Create (w http.ResponseWriter, r *http.Request) {\n\t/* This is an SBC */\n\tif CREATED == false {\n\t\t/* Move the checking of ID up first to confirm this is allowed */\n\t\t/* Do most of start. Just don't download because that would be downloading from self */\n\t\t/* Get address and ID */\n\t\t/* Get port number and set that to ID */\n\t\t/* Save localhost as Addr */\n\t\tsplitHostPort := strings.Split(r.Host, \":\")\n\t\ti, err := strconv.ParseInt(splitHostPort[1], 10, 32)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(500)\n\t\t\tpanic(err)\n\t\t}\n\t\t/* ID is now port number. Address is now correct Address */\n\t\tID = int32(i)\n\t\tSELF_ADDR = r.Host\n\t\t/* Check if ID is allowed in ALLOWED_IDs */\n\t\tif _, ok := ALLOWED_IDS[ID]; ok {\n\t\t\tnewBlockChain := data.NewBlockChain()\n\n\t\t\tmpt1 := p1.MerklePatriciaTrie{}\n\t\t\tmpt1.Initial()\n\t\t\tmpt1.Insert(\"1\", \"Origin\")\n\n\t\t\tmpt2 := p1.MerklePatriciaTrie{}\n\t\t\tmpt2.Initial()\n\t\t\tmpt2.Insert(\"1\", \"Decoy1\")\n\n\t\t\tmpt3 := p1.MerklePatriciaTrie{}\n\t\t\tmpt3.Initial()\n\t\t\tmpt3.Insert(\"1\", \"Decoy2\")\n\n\t\t\tmpt4 := p1.MerklePatriciaTrie{}\n\t\t\tmpt4.Initial()\n\t\t\tmpt4.Insert(\"1\", \"Decoy3\")\n\n\t\t\thexPubKey := hexutil.Encode(signature_p.PUBLIC_KEY)\n\t\t\tnewBlockChain.GenBlock(mpt1, hexPubKey)\n\t\t\tnewBlockChain.GenBlock(mpt2, hexPubKey)\n\t\t\tnewBlockChain.GenBlock(mpt3, hexPubKey)\n\t\t\tnewBlockChain.GenBlock(mpt4, hexPubKey)\n\t\t\t/* Set Global variable SBC to be this new blockchain */\n\t\t\tSBC = newBlockChain\n\t\t\t/* Generate Multiple Blocks Initially */\n\t\t\t\t\n\t\t\tblockChainJson, _ := SBC.BlockChainToJson()\n\t\t\t/* Write this to the server */\n\t\t\tw.Write([]byte(blockChainJson))\n\n\t\t\t/* Need to instantiate the peer list */\n\t\t\tPeers = data.NewPeerList(ID, 32)\n\t\t\tBALLOT = ReadDataFromBallot()\n\t\t\tCREATED = true\n\t\t}\n\t}\n}", "func NewCreateIOCOK() *CreateIOCOK {\n\treturn &CreateIOCOK{}\n}", "func (c *Client) CreateStorageNode(node *corev1.StorageNode) (*corev1.StorageNode, error) {\n\tif err := c.initClient(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tns := node.Namespace\n\tif len(ns) == 0 {\n\t\tns = metav1.NamespaceDefault\n\t}\n\n\treturn c.ost.CoreV1().StorageNodes(ns).Create(context.TODO(), node, metav1.CreateOptions{})\n}", "func (api *nodeAPI) SyncCreate(obj *cluster.Node) error {\n\tnewObj := obj\n\tevtType := kvstore.Created\n\tvar writeErr error\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\tnewObj, writeErr = apicl.ClusterV1().Node().Create(context.Background(), obj)\n\t\tif writeErr != nil && strings.Contains(writeErr.Error(), \"AlreadyExists\") {\n\t\t\tnewObj, writeErr = apicl.ClusterV1().Node().Update(context.Background(), obj)\n\t\t\tevtType = kvstore.Updated\n\t\t}\n\t}\n\n\tif writeErr == nil {\n\t\tapi.ct.handleNodeEvent(&kvstore.WatchEvent{Object: newObj, Type: evtType})\n\t}\n\treturn writeErr\n}", "func Create(children ...Element) *CompoundElement { return newCE(\"Create\", children) }", "func CreateDescribeParentPlatformRequest() (request *DescribeParentPlatformRequest) {\n\trequest = &DescribeParentPlatformRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"vs\", \"2018-12-12\", \"DescribeParentPlatform\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func createOIDCIssuer(client *azureclients.AzureClientWrapper, name, region, oidcResourceGroupName, storageAccountName, blobContainerName, subscriptionID, tenantID, publicKeyPath, outputDir string, resourceTags map[string]string, dryRun bool) (string, error) {\n\t// Add CCO's \"owned\" tag to resource tags map\n\tresourceTags[fmt.Sprintf(\"%s_%s\", ownedAzureResourceTagKeyPrefix, name)] = ownedAzureResourceTagValue\n\n\tstorageAccountKey := \"\"\n\tif !dryRun {\n\t\t// Ensure that the public key file can be read at the publicKeyPath before continuing\n\t\t_, err := os.ReadFile(publicKeyPath)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"unable to read public key file\")\n\t\t}\n\n\t\t// Ensure the resource group exists\n\t\terr = ensureResourceGroup(client, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure resource group\")\n\t\t}\n\n\t\t// Ensure storage account exists\n\t\terr = ensureStorageAccount(client, storageAccountName, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure storage account\")\n\t\t}\n\n\t\tstorageAccountKey, err = getStorageAccountKey(client, storageAccountName, oidcResourceGroupName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to get storage account key\")\n\t\t}\n\n\t\t// Ensure blob container exists\n\t\terr = ensureBlobContainer(client, oidcResourceGroupName, storageAccountName, blobContainerName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to create blob container\")\n\t\t}\n\t}\n\n\t// Upload OIDC documents (openid-configuration, jwks.json) to the blob container\n\toutputDirAbsPath, err := filepath.Abs(outputDir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tissuerURL, err := uploadOIDCDocuments(client, storageAccountName, storageAccountKey, publicKeyPath, blobContainerName, outputDirAbsPath, dryRun, resourceTags)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to upload OIDC documents\")\n\t}\n\n\t// Write cluster authentication object installer manifest cluster-authentication-02-config.yaml\n\t// for our issuerURL within outputDir/manifests\n\tif err = provisioning.CreateClusterAuthentication(issuerURL, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create cluster authentication manifest\")\n\t}\n\n\t// Write Azure AD pod identity webhook config secret azure-ad-pod-identity-webhook-config.yaml\n\t// within outputDir/manifests\n\tif err = createPodIdentityWebhookConfigSecret(tenantID, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create Azure AD pod identity webhook manifest\")\n\t}\n\n\treturn issuerURL, nil\n}", "func (b *Block) CreateGenesisBlock() {\n\n header := Header{0, int64(time.Now().Unix()), \"GenesisBlock\", \"\", 0, \"\"}\n b.Mpt = p1.GetMPTrie()\n b.Header = header\n}", "func NewCreateFileDefault(code int) *CreateFileDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &CreateFileDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewNode(id uint64) *Node { return &Node{Id: id} }", "func (c *defaultGcsClient) CreateObject(ctxIn context.Context, bucketName, objectName, content string) error {\n\tctx, span := trace.StartSpan(ctxIn, \"(*defaultGcsClient).CreateObject\")\n\tdefer span.End()\n\n\tw := c.client.Bucket(bucketName).Object(objectName).NewWriter(ctx)\n\tif _, err := fmt.Fprint(w, content); err != nil {\n\t\treturn err\n\t}\n\n\treturn w.Close()\n}", "func NewReplaceStorageV1CSINodeOK() *ReplaceStorageV1CSINodeOK {\n\treturn &ReplaceStorageV1CSINodeOK{}\n}", "func createNode(w http.ResponseWriter, r *http.Request) {\n\tvar newNode* Nodo\n\t//leemos el body de la petición\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Datos Inválidos\")\n\t}\n\t//tomamos los valores del body y los colocamos en una variable de struct de Nodo\n\tjson.Unmarshal(reqBody, &newNode)\n\t//fmt.Printf(\"%d\",newNode.Carnet)\n\t//insertamos la raiz\n\traiz=crearNodo(raiz,newNode)\n\tescribir,err2:=json.Marshal(raiz)\n\tif err2 != nil {\n log.Fatal(err2)\n }\n\tdata := []byte(escribir)\n err = ioutil.WriteFile(\"persiste.json\", data, 0644)\n if err != nil {\n log.Fatal(err)\n }\n\tfmt.Println(\"----------------\")\n\t//preorden(raiz)\n\t//createDot(raiz)\n\t//Si todo ha salido bien, devolvemos un status code 201 y el arbol\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\trespuesta:= &Respuesta{Message:\"Alumno creado exitosamente\"}\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(respuesta)\n\n}", "func CreateUploadIoTDataToBlockchainRequest() (request *UploadIoTDataToBlockchainRequest) {\n\trequest = &UploadIoTDataToBlockchainRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"lto\", \"2021-07-07\", \"UploadIoTDataToBlockchain\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateCreateFileDetectRequest() (request *CreateFileDetectRequest) {\n\trequest = &CreateFileDetectRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"CreateFileDetect\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func NewCreateImageFromSnapshotsRequestWithoutParam() *CreateImageFromSnapshotsRequest {\n\n return &CreateImageFromSnapshotsRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/images:createImageFromSnapshots\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (cc *CloudComb) CreateNamespace(params string) (uint, error) {\n\tif params == \"\" {\n\t\treturn 0, errors.New(\"Params is missed\")\n\t}\n\tparams = PurifyParams(params)\n\n\tbody := bytes.NewBufferString(params)\n\n\t// do rest request\n\tresult, _, err := cc.doRESTRequest(\"POST\", \"/api/v1/namespaces\", \"\", nil, body)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// create cluster response messages\n\ttype createNamespaceRes struct {\n\t\tId uint `json:\"namespace_id\"`\n\t}\n\tvar res createNamespaceRes\n\n\t// parse json\n\tif err := json.NewDecoder(strings.NewReader(result)).Decode(&res); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn res.Id, nil\n}", "func newnode(id byte, name string, value string) *xmlx.Node {\n\tnode := xmlx.NewNode(id)\n\tif name != \"\" {\n\t\tnode.Name = xml.Name{\n\t\t\tLocal: name,\n\t\t}\n\t}\n\tif value != \"\" {\n\t\tnode.Value = value\n\t}\n\treturn node\n}", "func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) (*policy.Request, error) {\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"restype\", \"container\")\n\treqQP.Set(\"comp\", \"list\")\n\tif options != nil && options.Prefix != nil {\n\t\treqQP.Set(\"prefix\", *options.Prefix)\n\t}\n\treqQP.Set(\"delimiter\", delimiter)\n\tif options != nil && options.Marker != nil {\n\t\treqQP.Set(\"marker\", *options.Marker)\n\t}\n\tif options != nil && options.Maxresults != nil {\n\t\treqQP.Set(\"maxresults\", strconv.FormatInt(int64(*options.Maxresults), 10))\n\t}\n\tif options != nil && options.Include != nil {\n\t\treqQP.Set(\"include\", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), \"[]\")), \",\"))\n\t}\n\tif options != nil && options.Timeout != nil {\n\t\treqQP.Set(\"timeout\", strconv.FormatInt(int64(*options.Timeout), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"x-ms-version\"] = []string{\"2020-10-02\"}\n\tif options != nil && options.RequestID != nil {\n\t\treq.Raw().Header[\"x-ms-client-request-id\"] = []string{*options.RequestID}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/xml\"}\n\treturn req, nil\n}", "func (c *VrouterNode) Create(contrailClient contrailclient.ApiClient) error {\n\tvrouterInfoLog.Printf(\"Creating %s %s\", c.Hostname, nodeType)\n\tgscObjects := []*contrailtypes.GlobalSystemConfig{}\n\tgscObjectsList, err := contrailClient.List(\"global-system-config\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(gscObjectsList) == 0 {\n\t\tvrouterInfoLog.Println(\"no gscObject\")\n\t}\n\n\tfor _, gscObject := range gscObjectsList {\n\t\tobj, err := contrailClient.ReadListResult(\"global-system-config\", &gscObject)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgscObjects = append(gscObjects, obj.(*contrailtypes.GlobalSystemConfig))\n\t}\n\tfor _, gsc := range gscObjects {\n\t\tvirtualRouter := &contrailtypes.VirtualRouter{}\n\t\tvirtualRouter.SetVirtualRouterIpAddress(c.IPAddress)\n\t\tvirtualRouter.SetParent(gsc)\n\t\tvirtualRouter.SetName(c.Hostname)\n\t\tannotations := contrailclient.ConvertMapToContrailKeyValuePairs(c.Annotations)\n\t\tvirtualRouter.SetAnnotations(&annotations)\n\t\tif err := contrailClient.Create(virtualRouter); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn c.ensureVMIVhost0Interface(contrailClient)\n}", "func CreateNode(t *testing.T, zkConn *szk.Conn, path string, obj interface{}) {\n\tvar data []byte\n\tvar err error\n\n\tif obj != nil {\n\t\tdata, err = json.Marshal(obj)\n\t\trequire.NoError(t, err)\n\t}\n\n\tlog.Infof(\"Creating path %+v\", path)\n\n\t_, err = zkConn.Create(path, data, 0, szk.WorldACL(szk.PermAll))\n\trequire.NoError(t, err)\n}", "func CreateMetastoreCreateTableRequest() (request *MetastoreCreateTableRequest) {\n\trequest = &MetastoreCreateTableRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Emr\", \"2016-04-08\", \"MetastoreCreateTable\", \"emr\", \"openAPI\")\n\treturn\n}", "func (client IdentityClient) CreateTagDefault(ctx context.Context, request CreateTagDefaultRequest) (response CreateTagDefaultResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createTagDefault, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateTagDefaultResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateTagDefaultResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateTagDefaultResponse); ok {\n\t\tcommon.EcContext.UpdateEndOfWindow(time.Duration(240 * time.Second))\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateTagDefaultResponse\")\n\t}\n\treturn\n}", "func (client *KeyVaultClient) setStorageAccountCreateRequest(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters, options *KeyVaultClientSetStorageAccountOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/storage/{storage-account-name}\"\n\tif storageAccountName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageAccountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storage-account-name}\", url.PathEscape(storageAccountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (c *Creator) New() (filestorage.FileStorage, error) {\n\tfs := New(c.apiKey, c.secret)\n\treturn fs, fs.SetBucket(c.defaultBucket)\n}", "func (client StorageGatewayClient) createCloudSync(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/cloudSyncs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateCloudSyncResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *ReplicationvCentersClient) createCreateRequest(ctx context.Context, fabricName string, vcenterName string, addVCenterRequest AddVCenterRequest, options *ReplicationvCentersClientBeginCreateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationvCenters/{vcenterName}\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif fabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter fabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fabricName}\", url.PathEscape(fabricName))\n\tif vcenterName == \"\" {\n\t\treturn nil, errors.New(\"parameter vcenterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vcenterName}\", url.PathEscape(vcenterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, addVCenterRequest)\n}", "func createNewStorageClass(ctx context.Context, clientset kubernetes.Interface,\n\tstorageClassName, provisioner string, defaultAnnotation bool) (*storagev1.StorageClass, error) {\n\tstorageClass := &storagev1.StorageClass{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: storageClassName,\n\t\t},\n\t\tProvisioner: provisioner,\n\t}\n\n\tif defaultAnnotation {\n\t\tstorageClass.Annotations = map[string]string{\n\t\t\t\"storageclass.kubernetes.io/is-default-class\": \"true\",\n\t\t}\n\t}\n\n\treturn clientset.StorageV1().StorageClasses().Create(ctx, storageClass, metav1.CreateOptions{})\n}", "func createNode(id int, myConf *Config, sm *State_Machine) cluster.Server {\n\tinitNode(id, myConf, sm)\n\t//Set up details about cluster nodes form json file.\n\tserver, err := cluster.New(id, \"config/cluster_config.json\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn server\n}", "func (fs *FileSystem) CreateFile(fileName string, data string, parentInodeNum int, fileType int) error {\n\n\t// Validation of the arguments\n\t// TODO same name file in the directory.\n\tif err := fs.validateCreationRequest(fileName); err != nil {\n\t\tfmt.Println(\"Error: Creation request fails while validating : \", err)\n\t\treturn err\n\t}\n\tdataBlockRequired := int(math.Ceil(float64(len(data) / DataBlockSize)))\n\t// Check resources available or not\n\tif err := resourceAvailable(fs, dataBlockRequired); err != nil {\n\t\tfmt.Println(\"Error: Creation request fails while check availabilty of resource : \", err)\n\t\treturn err\n\t}\n\n\tfmt.Println(\"filename\", fileName, \"datablockrequired\", dataBlockRequired)\n\t// Get Parent Inode\n\tparInode, err := getInodeInfo(parentInodeNum)\n\tif err != nil {\n\t\tfmt.Println(\"Unable to get parent inode \", err)\n\t\treturn err\n\t}\n\n\t// check parent inode has space to accomodate new file/ directory inside it.\n\t// here 4 is used because 1 for comma and 3 bytes representing inode number.\n\tif len(parInode) < (InodeBlockSize - 4) {\n\t\treturn fmt.Errorf(\"Parent inode doesn't have space left to accomodate new file in it\")\n\t}\n\n\t// Allocate an inode and intialise\n\tif dataBlockRequired != 0 {\n\t\tdataBlockRequired++\n\t}\n\tinode := inode{fs.nextFreeInode[0], fileType, parentInodeNum, fs.nextFreeDataBlock[:dataBlockRequired]}\n\n\tfmt.Println(\"inode\", inode)\n\t// Update fst with new inode entries.\n\tfs.UpdateFst(inode)\n\n\t// Add entry in FST in memory\n\tfs.fileSystemTable[fileName] = inode.inodeNum\n\n\tparentInode := parseInode(parInode)\n\tparentInode.dataList = append(parentInode.dataList, inode.inodeNum)\n\n\t// Update the dumpFile with the file content.\n\tif err := UpdateDumpFile(inode, data, fileName, parentInode, parentInodeNum); err != nil {\n\t\tfmt.Println(\"unable to update the disk : \", err)\n\t\treturn err\n\t}\n\n\t// TODO : After successfull creation of file, update the directory data block accordingly..\n\n\tfmt.Println(\"successful updation in disk\", inode)\n\n\treturn nil\n}", "func op_CREATE(pc *uint64, in *interpreter, ctx *callCtx) uint64 {\n\tstack := ctx.stack\n\n\tvalue, offset, size := stack.Pop(), stack.Pop(), stack.Pop()\n\tinput := ctx.memory.GetCopy(offset.Uint64(), size.Uint64())\n\n\tin.evm.create(ctx.contract, input, &value)\n\n\t// returned address from create\n\taddress := in.evm.create_addr.get(in.evm.level + 1)\n\n\taddr := new(uint256.Int).SetBytes(address.Bytes())\n\tstack.Push(addr)\n\n\treturn 0\n}", "func New() gocsi.StoragePluginProvider {\n\tsvc := service.New()\n\treturn &gocsi.StoragePlugin{\n\t\tController: svc,\n\t\tIdentity: svc,\n\t\tNode: svc,\n\t\tBeforeServe: svc.BeforeServe,\n\t\tRegisterAdditionalServers: svc.RegisterAdditionalServers,\n\n\t\tEnvVars: []string{\n\t\t\t// Enable request validation\n\t\t\tgocsi.EnvVarSpecReqValidation + \"=true\",\n\n\t\t\t// Enable serial volume access\n\t\t\tgocsi.EnvVarSerialVolAccess + \"=true\",\n\t\t},\n\t}\n}", "func (client IdentityClient) createTagNamespace(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagNamespaces\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagNamespaceResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func CreateHeader(command string, payload []byte, config *config.NodeConfig) []byte {\n\n\theader := make([]byte, 24)\n\n\t// https://developer.bitcoin.org/glossary.html#term-Start-string\n\tbinary.BigEndian.PutUint32(header, networkToStartString(config.GetNetwork()))\n\tcopy(header[4:16], []byte(command))\n\tbinary.BigEndian.PutUint32(header[16:20], uint32(len(payload)))\n\tchecksum := crypto.DoubleSha(payload)\n\n\tcopy(header[20:], checksum)\n\treturn header\n}", "func (client *StorageTargetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, storagetarget StorageTarget, options *StorageTargetsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif cacheName == \"\" {\n\t\treturn nil, errors.New(\"parameter cacheName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cacheName}\", url.PathEscape(cacheName))\n\tif storageTargetName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageTargetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageTargetName}\", url.PathEscape(storageTargetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, storagetarget)\n}", "func (d *dirInode) createNewObject(\n\tctx context.Context,\n\tname Name,\n\tmetadata map[string]string) (o *gcs.Object, err error) {\n\t// Create an empty backing object for the child, failing if it already\n\t// exists.\n\tvar precond int64\n\tcreateReq := &gcs.CreateObjectRequest{\n\t\tName: name.GcsObjectName(),\n\t\tContents: strings.NewReader(\"\"),\n\t\tGenerationPrecondition: &precond,\n\t\tMetadata: metadata,\n\t}\n\n\to, err = d.bucket.CreateObject(ctx, createReq)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func NewReplaceStorageV1CSINodeUnauthorized() *ReplaceStorageV1CSINodeUnauthorized {\n\n\treturn &ReplaceStorageV1CSINodeUnauthorized{}\n}", "func TestNew_noMetaOnInit(t *testing.T) {\n\tt.Parallel()\n\n\ttmpDir := t.TempDir()\n\tbucket, err := fileblob.OpenBucket(tmpDir, nil)\n\trequire.NoError(t, err)\n\trequire.NoError(t,\n\t\tbucket.WriteAll(context.Background(), \".pulumi/stacks/dev.json\", []byte(\"bar\"), nil))\n\n\tctx := context.Background()\n\t_, err = New(ctx, diagtest.LogSink(t), \"file://\"+filepath.ToSlash(tmpDir), nil)\n\trequire.NoError(t, err)\n\n\tassert.NoFileExists(t, filepath.Join(tmpDir, \".pulumi\", \"meta.yaml\"))\n}", "func (c *FakeZkConn) Create(path string, data []byte, flags int32, acl []zk.ACL) (string, error) {\n\tc.history.addToHistory(\"Create\", path, data, flags, acl)\n\treturn \"\", nil\n}", "func (n *namespaceClient) Create(namespaceName string) error {\n\turl := fmt.Sprintf(\"%s%s\", n.url, nsh.AddURL)\n\tdata, err := json.Marshal(defaultNamespaceRequest(namespaceName))\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = n.client.DoHTTPRequest(\"POST\", url, bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\tn.logger.Info(\"successfully created namespace\", zap.String(\"namespace\", namespaceName))\n\treturn nil\n}", "func (r ApiCreateHyperflexClusterStoragePolicyRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexClusterStoragePolicyRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}" ]
[ "0.63876355", "0.62214833", "0.6077855", "0.59198743", "0.56860864", "0.5582383", "0.54800385", "0.54257023", "0.5290674", "0.52353966", "0.51229566", "0.5113833", "0.51082814", "0.4998811", "0.49861813", "0.4984951", "0.4984342", "0.4967947", "0.4932291", "0.48867834", "0.487776", "0.48474652", "0.4842638", "0.4829285", "0.48117265", "0.47936964", "0.4793116", "0.47812447", "0.47533992", "0.47324088", "0.47225285", "0.46992943", "0.46627304", "0.46432903", "0.46342686", "0.46304902", "0.46229148", "0.46191964", "0.46191418", "0.46146235", "0.46137103", "0.4611886", "0.46082753", "0.46067795", "0.4594408", "0.45837674", "0.4581834", "0.45812643", "0.45784298", "0.45777604", "0.4565963", "0.45532665", "0.45499378", "0.45455414", "0.45447436", "0.4540639", "0.45388892", "0.45327666", "0.4531424", "0.4530402", "0.4525942", "0.45202553", "0.45043018", "0.45023048", "0.4498159", "0.44836417", "0.44715312", "0.44712776", "0.44606996", "0.44520682", "0.44509152", "0.4447772", "0.44443578", "0.44425368", "0.44403538", "0.44393718", "0.44284263", "0.4418594", "0.441854", "0.44157714", "0.44155148", "0.4413122", "0.44127804", "0.4410015", "0.4409967", "0.4409098", "0.44063807", "0.4403732", "0.44028598", "0.440037", "0.43976894", "0.4394422", "0.43891034", "0.43834728", "0.43743074", "0.43730712", "0.43682244", "0.43668425", "0.43651825", "0.43558002" ]
0.64319515
0
WithPayload adds the payload to the create storage v1 c s i node created response
func (o *CreateStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeCreated { o.Payload = payload return o }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *CreateStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *ReplaceStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeOK) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}", "func (o *ReplicateCreated) WithPayload(payload *models.SteeringRequestID) *ReplicateCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *ClientPermissionCreateInternalServerError) SetPayload(payload *ClientPermissionCreateInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeAccepted) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (r CreateRequest) Payload() *model.Payload {\n\tbuf, _ := json.Marshal(r)\n\treturn model.NewPostPayload(buf)\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenOK) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *CreateExtensionsV1beta1NamespacedIngressCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *ReplaceExtensionsV1beta1NamespacedIngressCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *ReplaceStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *PutProjectProjectNameStageStageNameServiceServiceNameResourceCreated) SetPayload(payload *models.Version) {\n\to.Payload = payload\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *CreateSpoeCreated) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateCreated) SetPayload(payload *models.SslCertificate) {\n\to.Payload = payload\n}", "func (o *ReplaceNodeV1alpha1RuntimeClassCreated) SetPayload(payload *models.IoK8sAPINodeV1alpha1RuntimeClass) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDOK) SetPayload(payload *models.Storage) {\n\to.Payload = payload\n}", "func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseCreated) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *PostOrderCreated) SetPayload(payload *models.OrderCreateResponse) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloOK) SetPayload(payload *models.CreateHelloResponse) {\n\to.Payload = payload\n}", "func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tr *resource,\n) (*svcsdk.CreateReplicationGroupInput, error) {\n\tres := &svcsdk.CreateReplicationGroupInput{}\n\n\tif r.ko.Spec.AtRestEncryptionEnabled != nil {\n\t\tres.SetAtRestEncryptionEnabled(*r.ko.Spec.AtRestEncryptionEnabled)\n\t}\n\tif r.ko.Spec.AuthToken != nil {\n\t\tres.SetAuthToken(*r.ko.Spec.AuthToken)\n\t}\n\tif r.ko.Spec.AutoMinorVersionUpgrade != nil {\n\t\tres.SetAutoMinorVersionUpgrade(*r.ko.Spec.AutoMinorVersionUpgrade)\n\t}\n\tif r.ko.Spec.AutomaticFailoverEnabled != nil {\n\t\tres.SetAutomaticFailoverEnabled(*r.ko.Spec.AutomaticFailoverEnabled)\n\t}\n\tif r.ko.Spec.CacheNodeType != nil {\n\t\tres.SetCacheNodeType(*r.ko.Spec.CacheNodeType)\n\t}\n\tif r.ko.Spec.CacheParameterGroupName != nil {\n\t\tres.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName)\n\t}\n\tif r.ko.Spec.CacheSecurityGroupNames != nil {\n\t\tf6 := []*string{}\n\t\tfor _, f6iter := range r.ko.Spec.CacheSecurityGroupNames {\n\t\t\tvar f6elem string\n\t\t\tf6elem = *f6iter\n\t\t\tf6 = append(f6, &f6elem)\n\t\t}\n\t\tres.SetCacheSecurityGroupNames(f6)\n\t}\n\tif r.ko.Spec.CacheSubnetGroupName != nil {\n\t\tres.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName)\n\t}\n\tif r.ko.Spec.Engine != nil {\n\t\tres.SetEngine(*r.ko.Spec.Engine)\n\t}\n\tif r.ko.Spec.EngineVersion != nil {\n\t\tres.SetEngineVersion(*r.ko.Spec.EngineVersion)\n\t}\n\tif r.ko.Spec.GlobalReplicationGroupID != nil {\n\t\tres.SetGlobalReplicationGroupId(*r.ko.Spec.GlobalReplicationGroupID)\n\t}\n\tif r.ko.Spec.KMSKeyID != nil {\n\t\tres.SetKmsKeyId(*r.ko.Spec.KMSKeyID)\n\t}\n\tif r.ko.Spec.MultiAZEnabled != nil {\n\t\tres.SetMultiAZEnabled(*r.ko.Spec.MultiAZEnabled)\n\t}\n\tif r.ko.Spec.NodeGroupConfiguration != nil {\n\t\tf13 := []*svcsdk.NodeGroupConfiguration{}\n\t\tfor _, f13iter := range r.ko.Spec.NodeGroupConfiguration {\n\t\t\tf13elem := &svcsdk.NodeGroupConfiguration{}\n\t\t\tif f13iter.NodeGroupID != nil {\n\t\t\t\tf13elem.SetNodeGroupId(*f13iter.NodeGroupID)\n\t\t\t}\n\t\t\tif f13iter.PrimaryAvailabilityZone != nil {\n\t\t\t\tf13elem.SetPrimaryAvailabilityZone(*f13iter.PrimaryAvailabilityZone)\n\t\t\t}\n\t\t\tif f13iter.PrimaryOutpostARN != nil {\n\t\t\t\tf13elem.SetPrimaryOutpostArn(*f13iter.PrimaryOutpostARN)\n\t\t\t}\n\t\t\tif f13iter.ReplicaAvailabilityZones != nil {\n\t\t\t\tf13elemf3 := []*string{}\n\t\t\t\tfor _, f13elemf3iter := range f13iter.ReplicaAvailabilityZones {\n\t\t\t\t\tvar f13elemf3elem string\n\t\t\t\t\tf13elemf3elem = *f13elemf3iter\n\t\t\t\t\tf13elemf3 = append(f13elemf3, &f13elemf3elem)\n\t\t\t\t}\n\t\t\t\tf13elem.SetReplicaAvailabilityZones(f13elemf3)\n\t\t\t}\n\t\t\tif f13iter.ReplicaCount != nil {\n\t\t\t\tf13elem.SetReplicaCount(*f13iter.ReplicaCount)\n\t\t\t}\n\t\t\tif f13iter.ReplicaOutpostARNs != nil {\n\t\t\t\tf13elemf5 := []*string{}\n\t\t\t\tfor _, f13elemf5iter := range f13iter.ReplicaOutpostARNs {\n\t\t\t\t\tvar f13elemf5elem string\n\t\t\t\t\tf13elemf5elem = *f13elemf5iter\n\t\t\t\t\tf13elemf5 = append(f13elemf5, &f13elemf5elem)\n\t\t\t\t}\n\t\t\t\tf13elem.SetReplicaOutpostArns(f13elemf5)\n\t\t\t}\n\t\t\tif f13iter.Slots != nil {\n\t\t\t\tf13elem.SetSlots(*f13iter.Slots)\n\t\t\t}\n\t\t\tf13 = append(f13, f13elem)\n\t\t}\n\t\tres.SetNodeGroupConfiguration(f13)\n\t}\n\tif r.ko.Spec.NotificationTopicARN != nil {\n\t\tres.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN)\n\t}\n\tif r.ko.Spec.NumCacheClusters != nil {\n\t\tres.SetNumCacheClusters(*r.ko.Spec.NumCacheClusters)\n\t}\n\tif r.ko.Spec.NumNodeGroups != nil {\n\t\tres.SetNumNodeGroups(*r.ko.Spec.NumNodeGroups)\n\t}\n\tif r.ko.Spec.Port != nil {\n\t\tres.SetPort(*r.ko.Spec.Port)\n\t}\n\tif r.ko.Spec.PreferredCacheClusterAZs != nil {\n\t\tf18 := []*string{}\n\t\tfor _, f18iter := range r.ko.Spec.PreferredCacheClusterAZs {\n\t\t\tvar f18elem string\n\t\t\tf18elem = *f18iter\n\t\t\tf18 = append(f18, &f18elem)\n\t\t}\n\t\tres.SetPreferredCacheClusterAZs(f18)\n\t}\n\tif r.ko.Spec.PreferredMaintenanceWindow != nil {\n\t\tres.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow)\n\t}\n\tif r.ko.Spec.PrimaryClusterID != nil {\n\t\tres.SetPrimaryClusterId(*r.ko.Spec.PrimaryClusterID)\n\t}\n\tif r.ko.Spec.ReplicasPerNodeGroup != nil {\n\t\tres.SetReplicasPerNodeGroup(*r.ko.Spec.ReplicasPerNodeGroup)\n\t}\n\tif r.ko.Spec.ReplicationGroupDescription != nil {\n\t\tres.SetReplicationGroupDescription(*r.ko.Spec.ReplicationGroupDescription)\n\t}\n\tif r.ko.Spec.ReplicationGroupID != nil {\n\t\tres.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID)\n\t}\n\tif r.ko.Spec.SecurityGroupIDs != nil {\n\t\tf24 := []*string{}\n\t\tfor _, f24iter := range r.ko.Spec.SecurityGroupIDs {\n\t\t\tvar f24elem string\n\t\t\tf24elem = *f24iter\n\t\t\tf24 = append(f24, &f24elem)\n\t\t}\n\t\tres.SetSecurityGroupIds(f24)\n\t}\n\tif r.ko.Spec.SnapshotARNs != nil {\n\t\tf25 := []*string{}\n\t\tfor _, f25iter := range r.ko.Spec.SnapshotARNs {\n\t\t\tvar f25elem string\n\t\t\tf25elem = *f25iter\n\t\t\tf25 = append(f25, &f25elem)\n\t\t}\n\t\tres.SetSnapshotArns(f25)\n\t}\n\tif r.ko.Spec.SnapshotName != nil {\n\t\tres.SetSnapshotName(*r.ko.Spec.SnapshotName)\n\t}\n\tif r.ko.Spec.SnapshotRetentionLimit != nil {\n\t\tres.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit)\n\t}\n\tif r.ko.Spec.SnapshotWindow != nil {\n\t\tres.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow)\n\t}\n\tif r.ko.Spec.Tags != nil {\n\t\tf29 := []*svcsdk.Tag{}\n\t\tfor _, f29iter := range r.ko.Spec.Tags {\n\t\t\tf29elem := &svcsdk.Tag{}\n\t\t\tif f29iter.Key != nil {\n\t\t\t\tf29elem.SetKey(*f29iter.Key)\n\t\t\t}\n\t\t\tif f29iter.Value != nil {\n\t\t\t\tf29elem.SetValue(*f29iter.Value)\n\t\t\t}\n\t\t\tf29 = append(f29, f29elem)\n\t\t}\n\t\tres.SetTags(f29)\n\t}\n\tif r.ko.Spec.TransitEncryptionEnabled != nil {\n\t\tres.SetTransitEncryptionEnabled(*r.ko.Spec.TransitEncryptionEnabled)\n\t}\n\tif r.ko.Spec.UserGroupIDs != nil {\n\t\tf31 := []*string{}\n\t\tfor _, f31iter := range r.ko.Spec.UserGroupIDs {\n\t\t\tvar f31elem string\n\t\t\tf31elem = *f31iter\n\t\t\tf31 = append(f31, &f31elem)\n\t\t}\n\t\tres.SetUserGroupIds(f31)\n\t}\n\n\treturn res, nil\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tctx context.Context,\n\tr *resource,\n) (*svcsdk.CreateModelPackageInput, error) {\n\tres := &svcsdk.CreateModelPackageInput{}\n\n\tif r.ko.Spec.CertifyForMarketplace != nil {\n\t\tres.SetCertifyForMarketplace(*r.ko.Spec.CertifyForMarketplace)\n\t}\n\tif r.ko.Spec.ClientToken != nil {\n\t\tres.SetClientToken(*r.ko.Spec.ClientToken)\n\t}\n\tif r.ko.Spec.InferenceSpecification != nil {\n\t\tf2 := &svcsdk.InferenceSpecification{}\n\t\tif r.ko.Spec.InferenceSpecification.Containers != nil {\n\t\t\tf2f0 := []*svcsdk.ModelPackageContainerDefinition{}\n\t\t\tfor _, f2f0iter := range r.ko.Spec.InferenceSpecification.Containers {\n\t\t\t\tf2f0elem := &svcsdk.ModelPackageContainerDefinition{}\n\t\t\t\tif f2f0iter.ContainerHostname != nil {\n\t\t\t\t\tf2f0elem.SetContainerHostname(*f2f0iter.ContainerHostname)\n\t\t\t\t}\n\t\t\t\tif f2f0iter.Image != nil {\n\t\t\t\t\tf2f0elem.SetImage(*f2f0iter.Image)\n\t\t\t\t}\n\t\t\t\tif f2f0iter.ImageDigest != nil {\n\t\t\t\t\tf2f0elem.SetImageDigest(*f2f0iter.ImageDigest)\n\t\t\t\t}\n\t\t\t\tif f2f0iter.ModelDataURL != nil {\n\t\t\t\t\tf2f0elem.SetModelDataUrl(*f2f0iter.ModelDataURL)\n\t\t\t\t}\n\t\t\t\tif f2f0iter.ProductID != nil {\n\t\t\t\t\tf2f0elem.SetProductId(*f2f0iter.ProductID)\n\t\t\t\t}\n\t\t\t\tf2f0 = append(f2f0, f2f0elem)\n\t\t\t}\n\t\t\tf2.SetContainers(f2f0)\n\t\t}\n\t\tif r.ko.Spec.InferenceSpecification.SupportedContentTypes != nil {\n\t\t\tf2f1 := []*string{}\n\t\t\tfor _, f2f1iter := range r.ko.Spec.InferenceSpecification.SupportedContentTypes {\n\t\t\t\tvar f2f1elem string\n\t\t\t\tf2f1elem = *f2f1iter\n\t\t\t\tf2f1 = append(f2f1, &f2f1elem)\n\t\t\t}\n\t\t\tf2.SetSupportedContentTypes(f2f1)\n\t\t}\n\t\tif r.ko.Spec.InferenceSpecification.SupportedRealtimeInferenceInstanceTypes != nil {\n\t\t\tf2f2 := []*string{}\n\t\t\tfor _, f2f2iter := range r.ko.Spec.InferenceSpecification.SupportedRealtimeInferenceInstanceTypes {\n\t\t\t\tvar f2f2elem string\n\t\t\t\tf2f2elem = *f2f2iter\n\t\t\t\tf2f2 = append(f2f2, &f2f2elem)\n\t\t\t}\n\t\t\tf2.SetSupportedRealtimeInferenceInstanceTypes(f2f2)\n\t\t}\n\t\tif r.ko.Spec.InferenceSpecification.SupportedResponseMIMETypes != nil {\n\t\t\tf2f3 := []*string{}\n\t\t\tfor _, f2f3iter := range r.ko.Spec.InferenceSpecification.SupportedResponseMIMETypes {\n\t\t\t\tvar f2f3elem string\n\t\t\t\tf2f3elem = *f2f3iter\n\t\t\t\tf2f3 = append(f2f3, &f2f3elem)\n\t\t\t}\n\t\t\tf2.SetSupportedResponseMIMETypes(f2f3)\n\t\t}\n\t\tif r.ko.Spec.InferenceSpecification.SupportedTransformInstanceTypes != nil {\n\t\t\tf2f4 := []*string{}\n\t\t\tfor _, f2f4iter := range r.ko.Spec.InferenceSpecification.SupportedTransformInstanceTypes {\n\t\t\t\tvar f2f4elem string\n\t\t\t\tf2f4elem = *f2f4iter\n\t\t\t\tf2f4 = append(f2f4, &f2f4elem)\n\t\t\t}\n\t\t\tf2.SetSupportedTransformInstanceTypes(f2f4)\n\t\t}\n\t\tres.SetInferenceSpecification(f2)\n\t}\n\tif r.ko.Spec.MetadataProperties != nil {\n\t\tf3 := &svcsdk.MetadataProperties{}\n\t\tif r.ko.Spec.MetadataProperties.CommitID != nil {\n\t\t\tf3.SetCommitId(*r.ko.Spec.MetadataProperties.CommitID)\n\t\t}\n\t\tif r.ko.Spec.MetadataProperties.GeneratedBy != nil {\n\t\t\tf3.SetGeneratedBy(*r.ko.Spec.MetadataProperties.GeneratedBy)\n\t\t}\n\t\tif r.ko.Spec.MetadataProperties.ProjectID != nil {\n\t\t\tf3.SetProjectId(*r.ko.Spec.MetadataProperties.ProjectID)\n\t\t}\n\t\tif r.ko.Spec.MetadataProperties.Repository != nil {\n\t\t\tf3.SetRepository(*r.ko.Spec.MetadataProperties.Repository)\n\t\t}\n\t\tres.SetMetadataProperties(f3)\n\t}\n\tif r.ko.Spec.ModelApprovalStatus != nil {\n\t\tres.SetModelApprovalStatus(*r.ko.Spec.ModelApprovalStatus)\n\t}\n\tif r.ko.Spec.ModelMetrics != nil {\n\t\tf5 := &svcsdk.ModelMetrics{}\n\t\tif r.ko.Spec.ModelMetrics.Bias != nil {\n\t\t\tf5f0 := &svcsdk.Bias{}\n\t\t\tif r.ko.Spec.ModelMetrics.Bias.Report != nil {\n\t\t\t\tf5f0f0 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Bias.Report.ContentDigest != nil {\n\t\t\t\t\tf5f0f0.SetContentDigest(*r.ko.Spec.ModelMetrics.Bias.Report.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Bias.Report.ContentType != nil {\n\t\t\t\t\tf5f0f0.SetContentType(*r.ko.Spec.ModelMetrics.Bias.Report.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Bias.Report.S3URI != nil {\n\t\t\t\t\tf5f0f0.SetS3Uri(*r.ko.Spec.ModelMetrics.Bias.Report.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f0.SetReport(f5f0f0)\n\t\t\t}\n\t\t\tf5.SetBias(f5f0)\n\t\t}\n\t\tif r.ko.Spec.ModelMetrics.Explainability != nil {\n\t\t\tf5f1 := &svcsdk.Explainability{}\n\t\t\tif r.ko.Spec.ModelMetrics.Explainability.Report != nil {\n\t\t\t\tf5f1f0 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Explainability.Report.ContentDigest != nil {\n\t\t\t\t\tf5f1f0.SetContentDigest(*r.ko.Spec.ModelMetrics.Explainability.Report.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Explainability.Report.ContentType != nil {\n\t\t\t\t\tf5f1f0.SetContentType(*r.ko.Spec.ModelMetrics.Explainability.Report.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.Explainability.Report.S3URI != nil {\n\t\t\t\t\tf5f1f0.SetS3Uri(*r.ko.Spec.ModelMetrics.Explainability.Report.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f1.SetReport(f5f1f0)\n\t\t\t}\n\t\t\tf5.SetExplainability(f5f1)\n\t\t}\n\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality != nil {\n\t\t\tf5f2 := &svcsdk.ModelDataQuality{}\n\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints != nil {\n\t\t\t\tf5f2f0 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.ContentDigest != nil {\n\t\t\t\t\tf5f2f0.SetContentDigest(*r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.ContentType != nil {\n\t\t\t\t\tf5f2f0.SetContentType(*r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.S3URI != nil {\n\t\t\t\t\tf5f2f0.SetS3Uri(*r.ko.Spec.ModelMetrics.ModelDataQuality.Constraints.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f2.SetConstraints(f5f2f0)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics != nil {\n\t\t\t\tf5f2f1 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.ContentDigest != nil {\n\t\t\t\t\tf5f2f1.SetContentDigest(*r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.ContentType != nil {\n\t\t\t\t\tf5f2f1.SetContentType(*r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.S3URI != nil {\n\t\t\t\t\tf5f2f1.SetS3Uri(*r.ko.Spec.ModelMetrics.ModelDataQuality.Statistics.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f2.SetStatistics(f5f2f1)\n\t\t\t}\n\t\t\tf5.SetModelDataQuality(f5f2)\n\t\t}\n\t\tif r.ko.Spec.ModelMetrics.ModelQuality != nil {\n\t\t\tf5f3 := &svcsdk.ModelQuality{}\n\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Constraints != nil {\n\t\t\t\tf5f3f0 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Constraints.ContentDigest != nil {\n\t\t\t\t\tf5f3f0.SetContentDigest(*r.ko.Spec.ModelMetrics.ModelQuality.Constraints.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Constraints.ContentType != nil {\n\t\t\t\t\tf5f3f0.SetContentType(*r.ko.Spec.ModelMetrics.ModelQuality.Constraints.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Constraints.S3URI != nil {\n\t\t\t\t\tf5f3f0.SetS3Uri(*r.ko.Spec.ModelMetrics.ModelQuality.Constraints.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f3.SetConstraints(f5f3f0)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Statistics != nil {\n\t\t\t\tf5f3f1 := &svcsdk.MetricsSource{}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Statistics.ContentDigest != nil {\n\t\t\t\t\tf5f3f1.SetContentDigest(*r.ko.Spec.ModelMetrics.ModelQuality.Statistics.ContentDigest)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Statistics.ContentType != nil {\n\t\t\t\t\tf5f3f1.SetContentType(*r.ko.Spec.ModelMetrics.ModelQuality.Statistics.ContentType)\n\t\t\t\t}\n\t\t\t\tif r.ko.Spec.ModelMetrics.ModelQuality.Statistics.S3URI != nil {\n\t\t\t\t\tf5f3f1.SetS3Uri(*r.ko.Spec.ModelMetrics.ModelQuality.Statistics.S3URI)\n\t\t\t\t}\n\t\t\t\tf5f3.SetStatistics(f5f3f1)\n\t\t\t}\n\t\t\tf5.SetModelQuality(f5f3)\n\t\t}\n\t\tres.SetModelMetrics(f5)\n\t}\n\tif r.ko.Spec.ModelPackageDescription != nil {\n\t\tres.SetModelPackageDescription(*r.ko.Spec.ModelPackageDescription)\n\t}\n\tif r.ko.Spec.ModelPackageGroupName != nil {\n\t\tres.SetModelPackageGroupName(*r.ko.Spec.ModelPackageGroupName)\n\t}\n\tif r.ko.Spec.ModelPackageName != nil {\n\t\tres.SetModelPackageName(*r.ko.Spec.ModelPackageName)\n\t}\n\tif r.ko.Spec.SourceAlgorithmSpecification != nil {\n\t\tf9 := &svcsdk.SourceAlgorithmSpecification{}\n\t\tif r.ko.Spec.SourceAlgorithmSpecification.SourceAlgorithms != nil {\n\t\t\tf9f0 := []*svcsdk.SourceAlgorithm{}\n\t\t\tfor _, f9f0iter := range r.ko.Spec.SourceAlgorithmSpecification.SourceAlgorithms {\n\t\t\t\tf9f0elem := &svcsdk.SourceAlgorithm{}\n\t\t\t\tif f9f0iter.AlgorithmName != nil {\n\t\t\t\t\tf9f0elem.SetAlgorithmName(*f9f0iter.AlgorithmName)\n\t\t\t\t}\n\t\t\t\tif f9f0iter.ModelDataURL != nil {\n\t\t\t\t\tf9f0elem.SetModelDataUrl(*f9f0iter.ModelDataURL)\n\t\t\t\t}\n\t\t\t\tf9f0 = append(f9f0, f9f0elem)\n\t\t\t}\n\t\t\tf9.SetSourceAlgorithms(f9f0)\n\t\t}\n\t\tres.SetSourceAlgorithmSpecification(f9)\n\t}\n\tif r.ko.Spec.Tags != nil {\n\t\tf10 := []*svcsdk.Tag{}\n\t\tfor _, f10iter := range r.ko.Spec.Tags {\n\t\t\tf10elem := &svcsdk.Tag{}\n\t\t\tif f10iter.Key != nil {\n\t\t\t\tf10elem.SetKey(*f10iter.Key)\n\t\t\t}\n\t\t\tif f10iter.Value != nil {\n\t\t\t\tf10elem.SetValue(*f10iter.Value)\n\t\t\t}\n\t\t\tf10 = append(f10, f10elem)\n\t\t}\n\t\tres.SetTags(f10)\n\t}\n\tif r.ko.Spec.ValidationSpecification != nil {\n\t\tf11 := &svcsdk.ModelPackageValidationSpecification{}\n\t\tif r.ko.Spec.ValidationSpecification.ValidationProfiles != nil {\n\t\t\tf11f0 := []*svcsdk.ModelPackageValidationProfile{}\n\t\t\tfor _, f11f0iter := range r.ko.Spec.ValidationSpecification.ValidationProfiles {\n\t\t\t\tf11f0elem := &svcsdk.ModelPackageValidationProfile{}\n\t\t\t\tif f11f0iter.ProfileName != nil {\n\t\t\t\t\tf11f0elem.SetProfileName(*f11f0iter.ProfileName)\n\t\t\t\t}\n\t\t\t\tif f11f0iter.TransformJobDefinition != nil {\n\t\t\t\t\tf11f0elemf1 := &svcsdk.TransformJobDefinition{}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.BatchStrategy != nil {\n\t\t\t\t\t\tf11f0elemf1.SetBatchStrategy(*f11f0iter.TransformJobDefinition.BatchStrategy)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.Environment != nil {\n\t\t\t\t\t\tf11f0elemf1f1 := map[string]*string{}\n\t\t\t\t\t\tfor f11f0elemf1f1key, f11f0elemf1f1valiter := range f11f0iter.TransformJobDefinition.Environment {\n\t\t\t\t\t\t\tvar f11f0elemf1f1val string\n\t\t\t\t\t\t\tf11f0elemf1f1val = *f11f0elemf1f1valiter\n\t\t\t\t\t\t\tf11f0elemf1f1[f11f0elemf1f1key] = &f11f0elemf1f1val\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf11f0elemf1.SetEnvironment(f11f0elemf1f1)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.MaxConcurrentTransforms != nil {\n\t\t\t\t\t\tf11f0elemf1.SetMaxConcurrentTransforms(*f11f0iter.TransformJobDefinition.MaxConcurrentTransforms)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.MaxPayloadInMB != nil {\n\t\t\t\t\t\tf11f0elemf1.SetMaxPayloadInMB(*f11f0iter.TransformJobDefinition.MaxPayloadInMB)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput != nil {\n\t\t\t\t\t\tf11f0elemf1f4 := &svcsdk.TransformInput{}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.CompressionType != nil {\n\t\t\t\t\t\t\tf11f0elemf1f4.SetCompressionType(*f11f0iter.TransformJobDefinition.TransformInput.CompressionType)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.ContentType != nil {\n\t\t\t\t\t\t\tf11f0elemf1f4.SetContentType(*f11f0iter.TransformJobDefinition.TransformInput.ContentType)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.DataSource != nil {\n\t\t\t\t\t\t\tf11f0elemf1f4f2 := &svcsdk.TransformDataSource{}\n\t\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource != nil {\n\t\t\t\t\t\t\t\tf11f0elemf1f4f2f0 := &svcsdk.TransformS3DataSource{}\n\t\t\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource.S3DataType != nil {\n\t\t\t\t\t\t\t\t\tf11f0elemf1f4f2f0.SetS3DataType(*f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource.S3DataType)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource.S3URI != nil {\n\t\t\t\t\t\t\t\t\tf11f0elemf1f4f2f0.SetS3Uri(*f11f0iter.TransformJobDefinition.TransformInput.DataSource.S3DataSource.S3URI)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tf11f0elemf1f4f2.SetS3DataSource(f11f0elemf1f4f2f0)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tf11f0elemf1f4.SetDataSource(f11f0elemf1f4f2)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformInput.SplitType != nil {\n\t\t\t\t\t\t\tf11f0elemf1f4.SetSplitType(*f11f0iter.TransformJobDefinition.TransformInput.SplitType)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf11f0elemf1.SetTransformInput(f11f0elemf1f4)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput != nil {\n\t\t\t\t\t\tf11f0elemf1f5 := &svcsdk.TransformOutput{}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput.Accept != nil {\n\t\t\t\t\t\t\tf11f0elemf1f5.SetAccept(*f11f0iter.TransformJobDefinition.TransformOutput.Accept)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput.AssembleWith != nil {\n\t\t\t\t\t\t\tf11f0elemf1f5.SetAssembleWith(*f11f0iter.TransformJobDefinition.TransformOutput.AssembleWith)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput.KMSKeyID != nil {\n\t\t\t\t\t\t\tf11f0elemf1f5.SetKmsKeyId(*f11f0iter.TransformJobDefinition.TransformOutput.KMSKeyID)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformOutput.S3OutputPath != nil {\n\t\t\t\t\t\t\tf11f0elemf1f5.SetS3OutputPath(*f11f0iter.TransformJobDefinition.TransformOutput.S3OutputPath)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf11f0elemf1.SetTransformOutput(f11f0elemf1f5)\n\t\t\t\t\t}\n\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformResources != nil {\n\t\t\t\t\t\tf11f0elemf1f6 := &svcsdk.TransformResources{}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformResources.InstanceCount != nil {\n\t\t\t\t\t\t\tf11f0elemf1f6.SetInstanceCount(*f11f0iter.TransformJobDefinition.TransformResources.InstanceCount)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformResources.InstanceType != nil {\n\t\t\t\t\t\t\tf11f0elemf1f6.SetInstanceType(*f11f0iter.TransformJobDefinition.TransformResources.InstanceType)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f11f0iter.TransformJobDefinition.TransformResources.VolumeKMSKeyID != nil {\n\t\t\t\t\t\t\tf11f0elemf1f6.SetVolumeKmsKeyId(*f11f0iter.TransformJobDefinition.TransformResources.VolumeKMSKeyID)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf11f0elemf1.SetTransformResources(f11f0elemf1f6)\n\t\t\t\t\t}\n\t\t\t\t\tf11f0elem.SetTransformJobDefinition(f11f0elemf1)\n\t\t\t\t}\n\t\t\t\tf11f0 = append(f11f0, f11f0elem)\n\t\t\t}\n\t\t\tf11.SetValidationProfiles(f11f0)\n\t\t}\n\t\tif r.ko.Spec.ValidationSpecification.ValidationRole != nil {\n\t\t\tf11.SetValidationRole(*r.ko.Spec.ValidationSpecification.ValidationRole)\n\t\t}\n\t\tres.SetValidationSpecification(f11)\n\t}\n\n\treturn res, nil\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPINetworkingV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateOK) SetPayload(payload *ClientPermissionCreateOKBody) {\n\to.Payload = payload\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressCreated) WithPayload(payload *models.IoK8sAPINetworkingV1beta1Ingress) *CreateNetworkingV1beta1NamespacedIngressCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateClusterCreated) WithPayload(payload *models.Kluster) *CreateClusterCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *ServiceAddCreated) SetPayload(payload *models.Service) {\n\to.Payload = payload\n}", "func BuildCreatePayload(productCreateBody string, productCreateToken string) (*product.CreatePayload, error) {\n\tvar err error\n\tvar body CreateRequestBody\n\t{\n\t\terr = json.Unmarshal([]byte(productCreateBody), &body)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid JSON for body, \\nerror: %s, \\nexample of valid JSON:\\n%s\", err, \"'{\\n \\\"code\\\": \\\"123asd123123asd\\\",\\n \\\"cost_price\\\": 123,\\n \\\"founder_id\\\": \\\"519151ca-6250-4eec-8016-1e14a68dc448\\\",\\n \\\"image\\\": \\\"/images/123.jpg\\\",\\n \\\"is_shelves\\\": false,\\n \\\"market_price\\\": 123,\\n \\\"name\\\": \\\"灌装辣椒\\\",\\n \\\"note\\\": \\\"备注\\\",\\n \\\"size\\\": \\\"瓶\\\",\\n \\\"type\\\": 1,\\n \\\"unit\\\": 1\\n }'\")\n\t\t}\n\t\tif !(body.Unit == 1 || body.Unit == 2 || body.Unit == 3 || body.Unit == 4 || body.Unit == 5 || body.Unit == 6 || body.Unit == 7 || body.Unit == 8) {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"body.unit\", body.Unit, []interface{}{1, 2, 3, 4, 5, 6, 7, 8}))\n\t\t}\n\t\tif !(body.Type == 1 || body.Type == 2 || body.Type == 3 || body.Type == 4 || body.Type == 5 || body.Type == 6 || body.Type == 7 || body.Type == 8) {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"body.type\", body.Type, []interface{}{1, 2, 3, 4, 5, 6, 7, 8}))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar token string\n\t{\n\t\ttoken = productCreateToken\n\t}\n\tv := &product.CreatePayload{\n\t\tName: body.Name,\n\t\tUnit: body.Unit,\n\t\tCostPrice: body.CostPrice,\n\t\tMarketPrice: body.MarketPrice,\n\t\tNote: body.Note,\n\t\tImage: body.Image,\n\t\tCode: body.Code,\n\t\tSize: body.Size,\n\t\tType: body.Type,\n\t\tIsShelves: body.IsShelves,\n\t\tFounderID: body.FounderID,\n\t}\n\tv.Token = token\n\n\treturn v, nil\n}", "func NewPayload(v interface{}) Payload {\n\treturn makeJSONPayload(v)\n}", "func (o *SemverGenerateCreated) SetPayload(payload *models.SemverTagSet) {\n\to.Payload = payload\n}", "func (o *CreateFoldersCreated) SetPayload(payload *models.CreateFolderResp) {\n\to.Payload = payload\n}", "func (o *CreateAuthenticationV1beta1TokenReviewCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1beta1TokenReview) {\n\to.Payload = payload\n}", "func (o *CreateSpoeCreated) WithPayload(payload string) *CreateSpoeCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateClusterDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ReplicateCreated) SetPayload(payload *models.SteeringRequestID) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateCreated) WithPayload(payload *models.SslCertificate) *CreateStorageSSLCertificateCreated {\n\to.Payload = payload\n\treturn o\n}", "func (nf *NetworkPayload) SetPayload(newpayload []byte) {\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tr *resource,\n) (*svcsdk.CreateStageInput, error) {\n\tres := &svcsdk.CreateStageInput{}\n\n\tif r.ko.Spec.AccessLogSettings != nil {\n\t\tf0 := &svcsdk.AccessLogSettings{}\n\t\tif r.ko.Spec.AccessLogSettings.DestinationARN != nil {\n\t\t\tf0.SetDestinationArn(*r.ko.Spec.AccessLogSettings.DestinationARN)\n\t\t}\n\t\tif r.ko.Spec.AccessLogSettings.Format != nil {\n\t\t\tf0.SetFormat(*r.ko.Spec.AccessLogSettings.Format)\n\t\t}\n\t\tres.SetAccessLogSettings(f0)\n\t}\n\tif r.ko.Spec.APIID != nil {\n\t\tres.SetApiId(*r.ko.Spec.APIID)\n\t}\n\tif r.ko.Spec.AutoDeploy != nil {\n\t\tres.SetAutoDeploy(*r.ko.Spec.AutoDeploy)\n\t}\n\tif r.ko.Spec.ClientCertificateID != nil {\n\t\tres.SetClientCertificateId(*r.ko.Spec.ClientCertificateID)\n\t}\n\tif r.ko.Spec.DefaultRouteSettings != nil {\n\t\tf4 := &svcsdk.RouteSettings{}\n\t\tif r.ko.Spec.DefaultRouteSettings.DataTraceEnabled != nil {\n\t\t\tf4.SetDataTraceEnabled(*r.ko.Spec.DefaultRouteSettings.DataTraceEnabled)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.DetailedMetricsEnabled != nil {\n\t\t\tf4.SetDetailedMetricsEnabled(*r.ko.Spec.DefaultRouteSettings.DetailedMetricsEnabled)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.LoggingLevel != nil {\n\t\t\tf4.SetLoggingLevel(*r.ko.Spec.DefaultRouteSettings.LoggingLevel)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.ThrottlingBurstLimit != nil {\n\t\t\tf4.SetThrottlingBurstLimit(*r.ko.Spec.DefaultRouteSettings.ThrottlingBurstLimit)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.ThrottlingRateLimit != nil {\n\t\t\tf4.SetThrottlingRateLimit(*r.ko.Spec.DefaultRouteSettings.ThrottlingRateLimit)\n\t\t}\n\t\tres.SetDefaultRouteSettings(f4)\n\t}\n\tif r.ko.Spec.DeploymentID != nil {\n\t\tres.SetDeploymentId(*r.ko.Spec.DeploymentID)\n\t}\n\tif r.ko.Spec.Description != nil {\n\t\tres.SetDescription(*r.ko.Spec.Description)\n\t}\n\tif r.ko.Spec.RouteSettings != nil {\n\t\tf7 := map[string]*svcsdk.RouteSettings{}\n\t\tfor f7key, f7valiter := range r.ko.Spec.RouteSettings {\n\t\t\tf7val := &svcsdk.RouteSettings{}\n\t\t\tif f7valiter.DataTraceEnabled != nil {\n\t\t\t\tf7val.SetDataTraceEnabled(*f7valiter.DataTraceEnabled)\n\t\t\t}\n\t\t\tif f7valiter.DetailedMetricsEnabled != nil {\n\t\t\t\tf7val.SetDetailedMetricsEnabled(*f7valiter.DetailedMetricsEnabled)\n\t\t\t}\n\t\t\tif f7valiter.LoggingLevel != nil {\n\t\t\t\tf7val.SetLoggingLevel(*f7valiter.LoggingLevel)\n\t\t\t}\n\t\t\tif f7valiter.ThrottlingBurstLimit != nil {\n\t\t\t\tf7val.SetThrottlingBurstLimit(*f7valiter.ThrottlingBurstLimit)\n\t\t\t}\n\t\t\tif f7valiter.ThrottlingRateLimit != nil {\n\t\t\t\tf7val.SetThrottlingRateLimit(*f7valiter.ThrottlingRateLimit)\n\t\t\t}\n\t\t\tf7[f7key] = f7val\n\t\t}\n\t\tres.SetRouteSettings(f7)\n\t}\n\tif r.ko.Spec.StageName != nil {\n\t\tres.SetStageName(*r.ko.Spec.StageName)\n\t}\n\tif r.ko.Spec.StageVariables != nil {\n\t\tf9 := map[string]*string{}\n\t\tfor f9key, f9valiter := range r.ko.Spec.StageVariables {\n\t\t\tvar f9val string\n\t\t\tf9val = *f9valiter\n\t\t\tf9[f9key] = &f9val\n\t\t}\n\t\tres.SetStageVariables(f9)\n\t}\n\tif r.ko.Spec.Tags != nil {\n\t\tf10 := map[string]*string{}\n\t\tfor f10key, f10valiter := range r.ko.Spec.Tags {\n\t\t\tvar f10val string\n\t\t\tf10val = *f10valiter\n\t\t\tf10[f10key] = &f10val\n\t\t}\n\t\tres.SetTags(f10)\n\t}\n\n\treturn res, nil\n}", "func CreatePayload(tr *v1beta1.TaskRun) in_toto.Link {\n\tl := in_toto.Link{\n\t\tType: \"_link\",\n\t}\n\n\tl.Materials = map[string]interface{}{}\n\tfor _, r := range tr.Spec.Resources.Inputs {\n\t\tfor _, rr := range tr.Status.ResourcesResult {\n\t\t\tif r.Name == rr.ResourceName {\n\t\t\t\tl.Materials[rr.ResourceName] = rr\n\t\t\t}\n\t\t}\n\t}\n\n\tl.Products = map[string]interface{}{}\n\tfor _, r := range tr.Spec.Resources.Outputs {\n\t\tfor _, rr := range tr.Status.ResourcesResult {\n\t\t\tif r.Name == rr.ResourceName {\n\t\t\t\tl.Products[rr.ResourceName] = rr\n\t\t\t}\n\t\t}\n\t}\n\n\tl.Environment = map[string]interface{}{}\n\t// Add Tekton release info here\n\tl.Environment[\"tekton\"] = tr.Status\n\treturn l\n}", "func NewPayload() Payload {\n\tp := Payload{-1, \"\", 0, \"\", 0}\n\treturn p\n}", "func (o *ReplaceCertificatesV1CertificateSigningRequestCreated) SetPayload(payload *models.IoK8sAPICertificatesV1CertificateSigningRequest) {\n\to.Payload = payload\n}", "func (o *CreateDiscoveryV1beta1NamespacedEndpointSliceCreated) SetPayload(payload *models.IoK8sAPIDiscoveryV1beta1EndpointSlice) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *SetResourceCreated) WithPayload(payload *models.Resource) *SetResourceCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateOK) WithPayload(payload *models.Event) *CreateOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateRbacAuthorizationV1alpha1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1alpha1Role) {\n\to.Payload = payload\n}", "func (o *CreateACLCreated) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *CreateTaskInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *CreateUserGardenCreated) SetPayload(payload *models.Garden) {\n\to.Payload = payload\n}", "func (o *PostInteractionCreated) WithPayload(payload *models.ConsoleInteraction) *PostInteractionCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateOK) SetPayload(payload *models.Event) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateNotFound) SetPayload(payload *ClientPermissionCreateNotFoundBody) {\n\to.Payload = payload\n}", "func (s *EngineClient) NewPayload(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) {\n\te := s.log.New(\"block_hash\", payload.BlockHash)\n\te.Trace(\"sending payload for execution\")\n\n\texecCtx, cancel := context.WithTimeout(ctx, time.Second*5)\n\tdefer cancel()\n\tvar result eth.PayloadStatusV1\n\terr := s.client.CallContext(execCtx, &result, \"engine_newPayloadV1\", payload)\n\te.Trace(\"Received payload execution result\", \"status\", result.Status, \"latestValidHash\", result.LatestValidHash, \"message\", result.ValidationError)\n\tif err != nil {\n\t\te.Error(\"Payload execution failed\", \"err\", err)\n\t\treturn nil, fmt.Errorf(\"failed to execute payload: %w\", err)\n\t}\n\treturn &result, nil\n}", "func (l *Loader) newPayload(name string, ver int) int {\n\tpi := len(l.payloads)\n\tpp := l.allocPayload()\n\tpp.name = name\n\tpp.ver = ver\n\tl.payloads = append(l.payloads, pp)\n\tl.growExtAttrBitmaps()\n\treturn pi\n}", "func (o *CreateTaskInternalServerError) WithPayload(payload interface{}) *CreateTaskInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateDocumentCreated) SetPayload(payload *internalmessages.Document) {\n\to.Payload = payload\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleCreated) SetPayload(payload *models.IoK8sAPIAutoscalingV1Scale) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedPodBindingCreated) SetPayload(payload *models.IoK8sAPICoreV1Binding) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) WithPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) *CreateCoreV1NamespacedServiceAccountTokenCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateACLAccepted) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *AddKeypairCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *WeaviateThingTemplatesCreateAccepted) SetPayload(payload *models.ThingTemplateGetResponse) {\n\to.Payload = payload\n}", "func (o *CreateBatchV1NamespacedJobCreated) SetPayload(payload *models.IoK8sAPIBatchV1Job) {\n\to.Payload = payload\n}", "func (o *RegisterInfraEnvCreated) WithPayload(payload *models.InfraEnv) *RegisterInfraEnvCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *AddReleasesCreated) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) SetPayload(payload *models.ACLFile) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateCurrentAPISessionCertificateOK) SetPayload(payload *rest_model.CreateCurrentAPISessionCertificateEnvelope) {\n\to.Payload = payload\n}", "func (o *CreateTCPCheckCreated) WithPayload(payload *models.TCPCheck) *CreateTCPCheckCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *RegisterInfraEnvCreated) SetPayload(payload *models.InfraEnv) {\n\to.Payload = payload\n}", "func (o *PostReposOwnerRepoKeysCreated) SetPayload(payload *models.UserKeysKeyID) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenAccepted) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (s *service) CreateWorkload(id string, payload io.Reader, contentType string) (string, error) {\n\tprefix := async.StoragePath(s.clusterUID, s.apiName)\n\tlog := s.logger.With(zap.String(\"id\", id), zap.String(\"contentType\", contentType))\n\n\tpayloadPath := async.PayloadPath(prefix, id)\n\tlog.Debug(\"uploading payload\", zap.String(\"path\", payloadPath))\n\tif err := s.storage.Upload(payloadPath, payload, contentType); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlog.Debug(\"sending message to queue\")\n\tif err := s.queue.SendMessage(id, id); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tstatusPath := fmt.Sprintf(\"%s/%s/status/%s\", prefix, id, async.StatusInQueue)\n\tlog.Debug(fmt.Sprintf(\"setting status to %s\", async.StatusInQueue))\n\tif err := s.storage.Upload(statusPath, strings.NewReader(\"\"), \"text/plain\"); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn id, nil\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) WithPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateRbacAuthorizationV1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1Role) {\n\to.Payload = payload\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tctx context.Context,\n\tr *resource,\n) (*svcsdk.CreateReservedInstancesListingInput, error) {\n\tres := &svcsdk.CreateReservedInstancesListingInput{}\n\n\tif r.ko.Spec.ClientToken != nil {\n\t\tres.SetClientToken(*r.ko.Spec.ClientToken)\n\t}\n\tif r.ko.Spec.InstanceCount != nil {\n\t\tres.SetInstanceCount(*r.ko.Spec.InstanceCount)\n\t}\n\tif r.ko.Spec.PriceSchedules != nil {\n\t\tf2 := []*svcsdk.PriceScheduleSpecification{}\n\t\tfor _, f2iter := range r.ko.Spec.PriceSchedules {\n\t\t\tf2elem := &svcsdk.PriceScheduleSpecification{}\n\t\t\tif f2iter.CurrencyCode != nil {\n\t\t\t\tf2elem.SetCurrencyCode(*f2iter.CurrencyCode)\n\t\t\t}\n\t\t\tif f2iter.Price != nil {\n\t\t\t\tf2elem.SetPrice(*f2iter.Price)\n\t\t\t}\n\t\t\tif f2iter.Term != nil {\n\t\t\t\tf2elem.SetTerm(*f2iter.Term)\n\t\t\t}\n\t\t\tf2 = append(f2, f2elem)\n\t\t}\n\t\tres.SetPriceSchedules(f2)\n\t}\n\tif r.ko.Spec.ReservedInstancesID != nil {\n\t\tres.SetReservedInstancesId(*r.ko.Spec.ReservedInstancesID)\n\t}\n\n\treturn res, nil\n}", "func (c *CharacterCreateCommand) Payload() *CharacterCreateCommandPayload {\n\treturn c.payload\n}", "func (o *AddRegionAZCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateDefault) WithPayload(payload *models.Error) *CreateStorageSSLCertificateDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteRuntimeContainerInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (service *ContrailService) RESTCreateContrailStorageNode(c echo.Context) error {\n\trequestData := &models.CreateContrailStorageNodeRequest{}\n\tif err := c.Bind(requestData); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err,\n\t\t\t\"resource\": \"contrail_storage_node\",\n\t\t}).Debug(\"bind failed on create\")\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, \"Invalid JSON format\")\n\t}\n\tctx := c.Request().Context()\n\tresponse, err := service.CreateContrailStorageNode(ctx, requestData)\n\tif err != nil {\n\t\treturn common.ToHTTPError(err)\n\t}\n\treturn c.JSON(http.StatusCreated, response)\n}", "func (o *ReadStorageV1beta1CSIDriverOK) SetPayload(payload *models.IoK8sAPIStorageV1beta1CSIDriver) {\n\to.Payload = payload\n}", "func (o *CreateUploadSessionCreated) SetPayload(payload *models.UploadSession) {\n\to.Payload = payload\n}", "func (c *V2) Create(endpoint string, model models.Model) (*http.Response, *gabs.Container, error) {\n\tjsonPayload, err := c.PrepareModel(model)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog.Println(\"[DEBUG] CREATE Payload: \", jsonPayload.String())\n\n\treq, err := c.PrepareRequest(http.MethodPost, endpoint, jsonPayload, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresponse, err := c.Do(req)\n\tif err != nil {\n\t\treturn response, nil, err\n\t}\n\n\tcontainer, err := GetContainer(response)\n\tif err != nil {\n\t\treturn response, nil, err\n\t}\n\treturn response, container, nil\n}", "func (o *CreatePeerCreated) SetPayload(payload *models.PeerSection) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesOK) SetPayload(payload []*models.VSphereManagementObject) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentCreated) SetPayload(payload *models.IoK8sAPIStorageV1beta1VolumeAttachment) {\n\to.Payload = payload\n}" ]
[ "0.6338703", "0.60790515", "0.5919006", "0.58873206", "0.5695712", "0.5632205", "0.5612034", "0.55868226", "0.53464735", "0.53214085", "0.5319903", "0.531182", "0.5279762", "0.52675426", "0.5255923", "0.5247841", "0.5237737", "0.5233633", "0.5189792", "0.5189389", "0.51761854", "0.5166897", "0.51525235", "0.5140903", "0.5137952", "0.5129179", "0.5110708", "0.51076126", "0.5097388", "0.5088015", "0.50742245", "0.50704306", "0.50657004", "0.5062062", "0.50605494", "0.50567794", "0.5056659", "0.504204", "0.50399554", "0.50351244", "0.5030948", "0.502779", "0.50229394", "0.5022469", "0.50170845", "0.50049514", "0.5004148", "0.5003787", "0.50011426", "0.49880975", "0.49865648", "0.49844941", "0.49784032", "0.49652737", "0.49627107", "0.49606636", "0.49606615", "0.49574244", "0.49553615", "0.49524072", "0.49323103", "0.49318936", "0.4929543", "0.4926742", "0.4919708", "0.49100772", "0.48981366", "0.4897144", "0.48932037", "0.48911095", "0.48896417", "0.48878357", "0.48876676", "0.4887171", "0.48837188", "0.48797294", "0.4875012", "0.48699614", "0.4863298", "0.48607248", "0.48573875", "0.4851463", "0.48438552", "0.4843517", "0.48412386", "0.48354042", "0.48314154", "0.48225343", "0.482151", "0.48166612", "0.48132497", "0.4812137", "0.48110014", "0.48079672", "0.48070434", "0.47972408", "0.4796626", "0.47887212", "0.47822565", "0.47809657" ]
0.6029008
2
SetPayload sets the payload to the create storage v1 c s i node created response
func (o *CreateStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) { o.Payload = payload }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *CreateStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}", "func (o *ReplaceNodeV1alpha1RuntimeClassCreated) SetPayload(payload *models.IoK8sAPINodeV1alpha1RuntimeClass) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateInternalServerError) SetPayload(payload *ClientPermissionCreateInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (nf *NetworkPayload) SetPayload(newpayload []byte) {\n}", "func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *CreateTaskInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateCreated) SetPayload(payload *models.SslCertificate) {\n\to.Payload = payload\n}", "func (o *CreateSpoeCreated) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}", "func (o *SemverGenerateCreated) SetPayload(payload *models.SemverTagSet) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDOK) SetPayload(payload *models.Storage) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseCreated) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *PostOrderCreated) SetPayload(payload *models.OrderCreateResponse) {\n\to.Payload = payload\n}", "func (o *ReplicateCreated) SetPayload(payload *models.SteeringRequestID) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) SetPayload(payload *models.ACLFile) {\n\to.Payload = payload\n}", "func (o *CreateOK) SetPayload(payload *models.Event) {\n\to.Payload = payload\n}", "func (o *CreateClusterDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetDataContextTopologyUUIDNodeNodeUUIDOK) SetPayload(payload *models.TapiTopologyTopologyNode) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateFoldersCreated) SetPayload(payload *models.CreateFolderResp) {\n\to.Payload = payload\n}", "func (o *RegisterInfraEnvCreated) SetPayload(payload *models.InfraEnv) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenOK) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *CreateACLCreated) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *DeleteRuntimeContainerInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *AddReleasesCreated) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleCreated) SetPayload(payload *models.IoK8sAPIAutoscalingV1Scale) {\n\to.Payload = payload\n}", "func (o *AddRegionAZCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesOK) SetPayload(payload []*models.VSphereManagementObject) {\n\to.Payload = payload\n}", "func (o *AddKeypairCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeAccepted) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateZoneInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *UpdateClusterInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *WeaviateThingTemplatesCreateAccepted) SetPayload(payload *models.ThingTemplateGetResponse) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloOK) SetPayload(payload *models.CreateHelloResponse) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *PutWorkpaceByIDInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateAuthenticationV1beta1TokenReviewCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1beta1TokenReview) {\n\to.Payload = payload\n}", "func (o *CreateRbacAuthorizationV1alpha1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1alpha1Role) {\n\to.Payload = payload\n}", "func (o *PostInteractionCreated) SetPayload(payload *models.ConsoleInteraction) {\n\to.Payload = payload\n}", "func (o *CreateACLAccepted) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *CreatePeerCreated) SetPayload(payload *models.PeerSection) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateTCPCheckCreated) SetPayload(payload *models.TCPCheck) {\n\to.Payload = payload\n}", "func (o *GetBackupRuntimeEnvironmentsInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDInternalServerError) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedPodBindingCreated) SetPayload(payload *models.IoK8sAPICoreV1Binding) {\n\to.Payload = payload\n}", "func (tx *Transaction) SetPayload() {\n\tsize := make([]byte, 300)\n\ttx.data.Payload = size\n}", "func (o *ReplaceCertificatesV1CertificateSigningRequestCreated) SetPayload(payload *models.IoK8sAPICertificatesV1CertificateSigningRequest) {\n\to.Payload = payload\n}", "func (o *GetHealthzInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *ServiceAddCreated) SetPayload(payload *models.Service) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseOK) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *CreateDocumentCreated) SetPayload(payload *internalmessages.Document) {\n\to.Payload = payload\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPINetworkingV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *GetVSphereDatastoresInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PutProjectProjectNameStageStageNameServiceServiceNameResourceCreated) SetPayload(payload *models.Version) {\n\to.Payload = payload\n}", "func (o *GetProviderRegionByIDInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *UpdateCatalogInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetClusterInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PostReposOwnerRepoKeysCreated) SetPayload(payload *models.UserKeysKeyID) {\n\to.Payload = payload\n}", "func (o *PostRegisterDetailsInternalServerError) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetV1RdssInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateTaskDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDUnauthorized) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ReadStorageV1beta1CSIDriverOK) SetPayload(payload *models.IoK8sAPIStorageV1beta1CSIDriver) {\n\to.Payload = payload\n}", "func (o *AddNewMaterialsForPostInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateRbacAuthorizationV1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1Role) {\n\to.Payload = payload\n}", "func (o *PutReposOwnerRepoContentsPathOK) SetPayload(payload *models.CreateFile) {\n\to.Payload = payload\n}", "func (o *GetNFTContractTokenOK) SetPayload(payload *models.NFTTokenRow) {\n\to.Payload = payload\n}", "func (o *CreateUserGardenCreated) SetPayload(payload *models.Garden) {\n\to.Payload = payload\n}", "func (o *CreateUploadSessionCreated) SetPayload(payload *models.UploadSession) {\n\to.Payload = payload\n}", "func (o *AddRegionAZInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *CreateTCPCheckDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateACLDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *AddPayloadRuntimeACLCreated) SetPayload(payload models.ACLFilesEntries) {\n\to.Payload = payload\n}", "func (o *CreateSpoeDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateCurrentAPISessionCertificateOK) SetPayload(payload *rest_model.CreateCurrentAPISessionCertificateEnvelope) {\n\to.Payload = payload\n}", "func (o *AddKeypairInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *CreateDiscoveryV1beta1NamespacedEndpointSliceCreated) SetPayload(payload *models.IoK8sAPIDiscoveryV1beta1EndpointSlice) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateBatchV1NamespacedJobCreated) SetPayload(payload *models.IoK8sAPIBatchV1Job) {\n\to.Payload = payload\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *PostOperationsGetNodeEdgePointDetailsOK) SetPayload(payload *models.TapiTopologyGetNodeEdgePointDetailsOutput) {\n\to.Payload = payload\n}", "func (o *ReplaceRbacAuthorizationV1beta1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1beta1Role) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesOK) SetPayload(payload *models.Presigned) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateOK) SetPayload(payload *ClientPermissionCreateOKBody) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}" ]
[ "0.6970405", "0.689244", "0.66865474", "0.6685111", "0.6621812", "0.64798963", "0.6440759", "0.6422618", "0.64157015", "0.6409855", "0.63932294", "0.63880134", "0.6366237", "0.63556445", "0.63401204", "0.6314925", "0.63056827", "0.6305267", "0.6303207", "0.6301179", "0.6295906", "0.62378937", "0.6230902", "0.6209168", "0.6199169", "0.61831266", "0.6180207", "0.6165513", "0.6165507", "0.6164532", "0.61599696", "0.6159434", "0.61555094", "0.61508465", "0.6150031", "0.6147048", "0.6136203", "0.6134205", "0.6130971", "0.61307704", "0.61254215", "0.61178535", "0.6114284", "0.61128604", "0.61051565", "0.6102786", "0.6100965", "0.60849166", "0.6084897", "0.60809815", "0.6079778", "0.6078102", "0.6077921", "0.6075611", "0.6073583", "0.60691786", "0.60675555", "0.6064587", "0.60628736", "0.60571754", "0.605426", "0.6053578", "0.604594", "0.60424197", "0.6036781", "0.6032272", "0.603012", "0.602903", "0.60212195", "0.6009385", "0.60068893", "0.60051644", "0.60047656", "0.59863734", "0.5986085", "0.5984059", "0.5982861", "0.5976576", "0.5974062", "0.5971688", "0.59707355", "0.5970121", "0.59697944", "0.5963349", "0.59588754", "0.5953041", "0.59492797", "0.5949186", "0.5944313", "0.5939043", "0.5935258", "0.59320855", "0.59276915", "0.5924703", "0.59200907", "0.5915939", "0.5914278", "0.59131753", "0.59127575", "0.59112114" ]
0.71794635
0
WriteResponse to the client
func (o *CreateStorageV1CSINodeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(201) if o.Payload != nil { payload := o.Payload if err := producer.Produce(rw, payload); err != nil { panic(err) // let the recovery middleware deal with this } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Write(w io.Writer) error", "func (c *Operation) writeResponse(rw http.ResponseWriter, status int, data []byte) { // nolint: unparam\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write(data); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}", "func WriteResponse(w http.ResponseWriter, object interface{}, rerr *irma.RemoteError) {\n\tstatus, bts := JsonResponse(object, rerr)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\t_, err := w.Write(bts)\n\tif err != nil {\n\t\tLogWarning(errors.WrapPrefix(err, \"failed to write response\", 0))\n\t}\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func (o *PingOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, v interface{}, statusCode int) {\n\tresBody, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\t_, _ = w.Write(resBody)\n}", "func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func (o *GetPingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func WriteResponse(w http.ResponseWriter, d string) {\n\tw.WriteHeader(200)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Write([]byte(d))\n\treturn\n}", "func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *CreateProgramOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func (o *VerifyAccountCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (c *SwitchVersion) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postSwitchVersion(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *PutRecordingsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyHealthCredentialOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}", "func (o *NewDiscoveryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *WeaviateThingsGetNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *UpdateLinkInPostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetChatroomsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetUIContentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ListVsphereResourceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ResponseWrite(w http.ResponseWriter, responseCode int, responseData interface{}) {\n\t// Write Response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(responseCode)\n\n\t// Write JSON to Response\n\tjson.NewEncoder(w).Encode(responseData)\n}", "func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}", "func (o *PostKeysKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func (o *CreateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WriteResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PutQuestionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}", "func (o *PostOperationsDeleteP2PPathCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *HealthGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyEmailTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsPatchNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (o *WeaviateThingsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *DeleteServiceIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PostOperationsGetNodeEdgePointDetailsCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *UserEditOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviatePeersAnnounceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *CertifyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(writer http.ResponseWriter, response *http.Response) (int64, error) {\n\tdefer response.Body.Close()\n\twriteResponseHeaders(writer, response, false)\n\treturn io.Copy(writer, response.Body)\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *FingerPathsPostCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *PostPlaybookOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPingDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostManagementKubernetesIoV1NodesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *StopAppAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetMeetupsDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostEventCreated) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(201)\n}", "func (o *GetTaskTaskIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateTCPCheckAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PostOperationsGetNetworkElementListCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ServiceInstanceLastOperationGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header RetryAfter\n\n\tretryAfter := o.RetryAfter\n\tif retryAfter != \"\" {\n\t\trw.Header().Set(\"RetryAfter\", retryAfter)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetTaskDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPiecesIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetDetailOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\trw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *LogoutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (o *UploadFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, data interface{}) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": http.StatusOK,\n\t\t},\n\t\t\"data\": data,\n\t}\n\treturn jsonResponse(w, env)\n}", "func (o *WeaviateThingTemplatesCreateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (r *Responder) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\tfor k, v := range r.headers {\n\t\tfor _, val := range v {\n\t\t\trw.Header().Add(k, val)\n\t\t}\n\t}\n\n\trw.WriteHeader(r.code)\n\n\tif r.response != nil {\n\t\tif err := producer.Produce(rw, r.response); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (o *GetGateSourceByGateNameAndMntOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateSpoeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *Output) writeResponse(response string) error {\r\n\t// write the response\r\n\tif _, err := o.writer.WriteString(response + \"\\n\"); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (o *GetTransportByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *TransferOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateUserCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ViewOneOrderOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVisiblePruebasFromQuestionTestInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(500)\n}", "func (o *GetWhaleTranfersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.OperationsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *SearchTournamentsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*models.Tournament, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateTCPCheckCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}" ]
[ "0.81291586", "0.78819287", "0.77723724", "0.7772298", "0.77532965", "0.7740895", "0.7667328", "0.76388013", "0.76095575", "0.75802743", "0.75792146", "0.7567954", "0.75612247", "0.7558208", "0.7545076", "0.75431097", "0.7542526", "0.7535154", "0.75308895", "0.75206727", "0.75192624", "0.7513445", "0.75115013", "0.7506245", "0.75036865", "0.74994856", "0.7488267", "0.7484068", "0.7476975", "0.74681216", "0.7467429", "0.74663514", "0.7464419", "0.74637115", "0.74637115", "0.74621916", "0.74607694", "0.74600816", "0.74461263", "0.7444002", "0.74358237", "0.7427366", "0.7425954", "0.7418714", "0.7413481", "0.74079764", "0.7406604", "0.74053806", "0.7399197", "0.73880255", "0.73864275", "0.7381308", "0.7361386", "0.73605716", "0.73553914", "0.735516", "0.7353125", "0.7348355", "0.734634", "0.7328798", "0.7326309", "0.7318161", "0.73170096", "0.73166984", "0.7316146", "0.7313389", "0.73119754", "0.73103034", "0.73090947", "0.7301638", "0.729702", "0.7292011", "0.7291873", "0.7289617", "0.72853845", "0.7284048", "0.7282259", "0.7280808", "0.72753084", "0.7275278", "0.7273494", "0.72732604", "0.7269464", "0.72693926", "0.7268149", "0.72664154", "0.72615176", "0.72536385", "0.7251536", "0.7249643", "0.72487813", "0.72475266", "0.72414196", "0.723942", "0.7237652", "0.7234592", "0.72287256", "0.72233856", "0.72163224", "0.7215305", "0.72126275" ]
0.0
-1
NewCreateStorageV1CSINodeAccepted creates CreateStorageV1CSINodeAccepted with default headers values
func NewCreateStorageV1CSINodeAccepted() *CreateStorageV1CSINodeAccepted { return &CreateStorageV1CSINodeAccepted{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *CreateStorageV1CSINodeAccepted) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeAccepted {\n\to.Payload = payload\n\treturn o\n}", "func NewCreateStorageV1CSINodeOK() *CreateStorageV1CSINodeOK {\n\n\treturn &CreateStorageV1CSINodeOK{}\n}", "func (o *CreateStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}", "func (o *CreateStorageV1CSINodeAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func NewCreateNetworkingV1beta1NamespacedIngressAccepted() *CreateNetworkingV1beta1NamespacedIngressAccepted {\n\n\treturn &CreateNetworkingV1beta1NamespacedIngressAccepted{}\n}", "func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n retentionEnabled := false\n headers[\"x-emc-retention-period\"] = []string{\"0\"}\n if ecsBucket.Retention != \"\" {\n days, err := strconv.ParseInt(ecsBucket.Retention, 10, 64)\n if err == nil {\n if days > 0 {\n seconds := days * 24 * 3600\n headers[\"x-emc-retention-period\"] = []string{int64toString(seconds)}\n retentionEnabled = true\n }\n }\n }\n var expirationCurrentVersions int64\n expirationCurrentVersions = 0\n if ecsBucket.ExpirationCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationCurrentVersions, 10, 64)\n if err == nil {\n expirationCurrentVersions = days\n }\n }\n var expirationNonCurrentVersions int64\n expirationNonCurrentVersions = 0\n if ecsBucket.ExpirationNonCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationNonCurrentVersions, 10, 64)\n if err == nil && ecsBucket.EnableVersioning {\n expirationNonCurrentVersions = days\n }\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n versioningStatusOK := true\n lifecyclePolicyStatusOK := true\n // If the bucket has been created\n if bucketCreateResponse.Code == 200 {\n if !retentionEnabled && ecsBucket.EnableVersioning {\n // Enable versioning\n enableVersioningHeaders := map[string][]string{}\n enableVersioningHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n versioningConfiguration := `\n <VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\n <Status>Enabled</Status>\n <MfaDelete>Disabled</MfaDelete>\n </VersioningConfiguration>\n `\n enableVersioningResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?versioning\", enableVersioningHeaders, versioningConfiguration)\n if enableVersioningResponse.Code != 200 {\n versioningStatusOK = false\n }\n }\n if expirationCurrentVersions > 0 || expirationNonCurrentVersions > 0 {\n lifecyclePolicyHeaders := map[string][]string{}\n lifecyclePolicyHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n lifecyclePolicyConfiguration := `\n <LifecycleConfiguration>\n <Rule>\n <ID>expiration</ID>\n <Prefix></Prefix>\n <Status>Enabled</Status>\n `\n if expirationCurrentVersions > 0 && expirationNonCurrentVersions > 0 {\n // Enable expiration for both current and non current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n } else {\n if expirationCurrentVersions > 0 {\n // Enable expiration for current versions only\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n }\n if expirationNonCurrentVersions > 0 {\n // Enable expiration for non current versions only\n // To fix a bug in ECS 3.0 where an expiration for non current version can't be set if there's no expiration set for current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>1000000</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n }\n }\n lifecyclePolicyConfiguration += `\n </Rule>\n </LifecycleConfiguration>\n `\n lifecyclePolicyResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?lifecycle\", lifecyclePolicyHeaders, lifecyclePolicyConfiguration)\n if lifecyclePolicyResponse.Code != 200 {\n lifecyclePolicyStatusOK = false\n }\n }\n if versioningStatusOK && lifecyclePolicyStatusOK {\n rendering.JSON(w, http.StatusOK, \"\")\n } else {\n message := \"\"\n if !versioningStatusOK {\n message += \" Versioning can't be enabled.\"\n }\n if !lifecyclePolicyStatusOK {\n message += \" Expiration can't be set.\"\n }\n rendering.JSON(w, http.StatusOK, message)\n }\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, s3.AccessKey, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func (o *CreateStorageV1CSINodeOK) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ReplaceStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *ReplaceStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func NewCreateStorageV1CSINodeCreated() *CreateStorageV1CSINodeCreated {\n\n\treturn &CreateStorageV1CSINodeCreated{}\n}", "func (sdk *SDK) NewNode(prefer *cloudsvr.PreferAttrs) (*cloudsvr.CloudNode, *cloudsvr.PreferAttrs, error) {\n\n\tvar (\n\t\tpassword, _ = utils.GenPassword(24)\n\t\treq = &CreateInstanceRequest{\n\t\t\tImageID: OsImage,\n\t\t\tPassword: password,\n\t\t\tInstanceName: NodeName,\n\t\t\tInstanceChargeType: \"PostPaid\", // require RMB 100+\n\t\t\tSecurityGroupID: \"whatever\", // will be automatic rewrite\n\t\t\tInternetChargeType: \"PayByTraffic\", // traffic payment\n\t\t\tInternetMaxBandwidthOut: \"100\", // 100M\n\t\t\tLabels: NodeLabels,\n\t\t}\n\t)\n\n\t// if prefered attributes set, use prefer region & instance-type\n\tif prefer != nil && prefer.Valid() == nil {\n\t\tvar (\n\t\t\treg = prefer.RegionOrZone\n\t\t\ttyp = prefer.InstanceType\n\t\t)\n\t\tlog.Printf(\"create aliyun ecs by using prefered region %s, instance type %s ...\", reg, typ)\n\n\t\treq.RegionID = reg // cn-beijing\n\t\treq.InstanceType = typ // ecs.n4.large\n\n\t\tcreated, err := sdk.createNode(req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tlog.Printf(\"created prefered aliyun ecs succeed: %s\", created.ID)\n\t\treturn created, prefer, nil\n\t}\n\n\tlog.Infoln(\"creating aliyun ecs by trying all regions & types ...\")\n\n\t// if prefered created failed, or without prefer region & instance-type\n\t// try best on all region & instance-types to create the new aliyun ecs\n\tvar (\n\t\tregions []RegionType // all of aliyun regions\n\t\ttypes []InstanceTypeItemType // all of instance types within given range of mems & cpus\n\t\terr error\n\t\tcreated *cloudsvr.CloudNode\n\t)\n\n\t// list all regions\n\tregions, err = sdk.ListRegions()\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListRegions() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\t// list specified range of instance types\n\ttypes, err = sdk.ListInstanceTypes(2, 4, 2, 8) // TODO range of given cpus/mems ranges\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListInstanceTypes() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tvar (\n\t\tuseRegionID, useInsType string\n\t)\n\t// range all regions & types to try to create ecs instance\n\tfor _, reg := range regions {\n\t\tfor _, typ := range types {\n\t\t\treq.RegionID = reg.RegionID // cn-beijing\n\t\t\treq.InstanceType = typ.InstanceTypeID // ecs.n4.large\n\n\t\t\t// if created succeed, directly return\n\t\t\tcreated, err = sdk.createNode(req)\n\t\t\tif err == nil {\n\t\t\t\tuseRegionID, useInsType = reg.RegionID, typ.InstanceTypeID\n\t\t\t\tgoto END\n\t\t\t}\n\n\t\t\tif sdk.isFatalError(err) {\n\t\t\t\tlog.Errorf(\"create aliyun ecs got fatal error, stop retry: %v\", err)\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tlog.Warnf(\"create aliyun ecs failed: %v, will retry another region or type\", err)\n\t\t}\n\t}\n\nEND:\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog.Printf(\"created aliyun ecs %s at %s and type is %s\", created.ID, useRegionID, useInsType)\n\treturn created, &cloudsvr.PreferAttrs{RegionOrZone: useRegionID, InstanceType: useInsType}, nil\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, ecsBucket.User, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n log.Print(bucketCreateResponse)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n log.Print(err)\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func NewWatchStorageV1CSINodeListOK() *WatchStorageV1CSINodeListOK {\n\treturn &WatchStorageV1CSINodeListOK{}\n}", "func NewPostManagementKubernetesIoV1NodesAccepted() *PostManagementKubernetesIoV1NodesAccepted {\n\n\treturn &PostManagementKubernetesIoV1NodesAccepted{}\n}", "func NewCreateExtensionsV1beta1NamespacedIngressAccepted() *CreateExtensionsV1beta1NamespacedIngressAccepted {\n\n\treturn &CreateExtensionsV1beta1NamespacedIngressAccepted{}\n}", "func (client IdentityClient) createTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagDefaults\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewReplaceStorageV1CSINodeOK() *ReplaceStorageV1CSINodeOK {\n\n\treturn &ReplaceStorageV1CSINodeOK{}\n}", "func NewCreateTCPCheckAccepted() *CreateTCPCheckAccepted {\n\n\treturn &CreateTCPCheckAccepted{}\n}", "func (client *NetworkToNetworkInterconnectsClient) createCreateRequest(ctx context.Context, resourceGroupName string, networkFabricName string, networkToNetworkInterconnectName string, body NetworkToNetworkInterconnect, options *NetworkToNetworkInterconnectsClientBeginCreateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkFabrics/{networkFabricName}/networkToNetworkInterconnects/{networkToNetworkInterconnectName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif networkFabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkFabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkFabricName}\", url.PathEscape(networkFabricName))\n\tif networkToNetworkInterconnectName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkToNetworkInterconnectName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkToNetworkInterconnectName}\", url.PathEscape(networkToNetworkInterconnectName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func NewSnapshotCreateAccepted() *SnapshotCreateAccepted {\n\treturn &SnapshotCreateAccepted{}\n}", "func (s *SmartContract) CreateOi(ctx contractapi.TransactionContextInterface, oiNumber string, saudacao string, despedida string, oidenovo string, pessoa string) error {\n\tOi := Oi{\n\t\tSaudacao: saudacao,\n\t\tDespedida: despedida,\n\t\tOidenovo: oidenovo,\n\t\tPessoa: pessoa,\n\t}\n\n\toiAsBytes, _ := json.Marshal(Oi)\n\n\treturn ctx.GetStub().PutState(oiNumber, oiAsBytes)\n}", "func (o *CreateStorageV1CSINodeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func NewCreateCoreV1PersistentVolumeAccepted() *CreateCoreV1PersistentVolumeAccepted {\n\treturn &CreateCoreV1PersistentVolumeAccepted{}\n}", "func NewReplaceStorageV1CSINodeOK() *ReplaceStorageV1CSINodeOK {\n\treturn &ReplaceStorageV1CSINodeOK{}\n}", "func NewCreateStorageV1CSINodeUnauthorized() *CreateStorageV1CSINodeUnauthorized {\n\n\treturn &CreateStorageV1CSINodeUnauthorized{}\n}", "func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {\n\trequest = &GetNerCustomizedSeaEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerCustomizedSeaEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateAcceptInvitationRequest() (request *AcceptInvitationRequest) {\n\trequest = &AcceptInvitationRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Baas\", \"2018-07-31\", \"AcceptInvitation\", \"\", \"\")\n\treturn\n}", "func NewCreateCoreV1NamespacedPodAccepted() *CreateCoreV1NamespacedPodAccepted {\n\treturn &CreateCoreV1NamespacedPodAccepted{}\n}", "func NewCreateTaskAccepted() *CreateTaskAccepted {\n\n\treturn &CreateTaskAccepted{}\n}", "func Create (w http.ResponseWriter, r *http.Request) {\n\t/* This is an SBC */\n\tif CREATED == false {\n\t\t/* Move the checking of ID up first to confirm this is allowed */\n\t\t/* Do most of start. Just don't download because that would be downloading from self */\n\t\t/* Get address and ID */\n\t\t/* Get port number and set that to ID */\n\t\t/* Save localhost as Addr */\n\t\tsplitHostPort := strings.Split(r.Host, \":\")\n\t\ti, err := strconv.ParseInt(splitHostPort[1], 10, 32)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(500)\n\t\t\tpanic(err)\n\t\t}\n\t\t/* ID is now port number. Address is now correct Address */\n\t\tID = int32(i)\n\t\tSELF_ADDR = r.Host\n\t\t/* Check if ID is allowed in ALLOWED_IDs */\n\t\tif _, ok := ALLOWED_IDS[ID]; ok {\n\t\t\tnewBlockChain := data.NewBlockChain()\n\n\t\t\tmpt1 := p1.MerklePatriciaTrie{}\n\t\t\tmpt1.Initial()\n\t\t\tmpt1.Insert(\"1\", \"Origin\")\n\n\t\t\tmpt2 := p1.MerklePatriciaTrie{}\n\t\t\tmpt2.Initial()\n\t\t\tmpt2.Insert(\"1\", \"Decoy1\")\n\n\t\t\tmpt3 := p1.MerklePatriciaTrie{}\n\t\t\tmpt3.Initial()\n\t\t\tmpt3.Insert(\"1\", \"Decoy2\")\n\n\t\t\tmpt4 := p1.MerklePatriciaTrie{}\n\t\t\tmpt4.Initial()\n\t\t\tmpt4.Insert(\"1\", \"Decoy3\")\n\n\t\t\thexPubKey := hexutil.Encode(signature_p.PUBLIC_KEY)\n\t\t\tnewBlockChain.GenBlock(mpt1, hexPubKey)\n\t\t\tnewBlockChain.GenBlock(mpt2, hexPubKey)\n\t\t\tnewBlockChain.GenBlock(mpt3, hexPubKey)\n\t\t\tnewBlockChain.GenBlock(mpt4, hexPubKey)\n\t\t\t/* Set Global variable SBC to be this new blockchain */\n\t\t\tSBC = newBlockChain\n\t\t\t/* Generate Multiple Blocks Initially */\n\t\t\t\t\n\t\t\tblockChainJson, _ := SBC.BlockChainToJson()\n\t\t\t/* Write this to the server */\n\t\t\tw.Write([]byte(blockChainJson))\n\n\t\t\t/* Need to instantiate the peer list */\n\t\t\tPeers = data.NewPeerList(ID, 32)\n\t\t\tBALLOT = ReadDataFromBallot()\n\t\t\tCREATED = true\n\t\t}\n\t}\n}", "func NewCreateCoreV1NamespacedServiceAccountTokenAccepted() *CreateCoreV1NamespacedServiceAccountTokenAccepted {\n\n\treturn &CreateCoreV1NamespacedServiceAccountTokenAccepted{}\n}", "func (o *CreateStorageV1CSINodeAccepted) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func NewCreateRbacAuthorizationV1NamespacedRoleAccepted() *CreateRbacAuthorizationV1NamespacedRoleAccepted {\n\n\treturn &CreateRbacAuthorizationV1NamespacedRoleAccepted{}\n}", "func (r ApiCreateHyperflexExtIscsiStoragePolicyRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexExtIscsiStoragePolicyRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}", "func (o *ReplaceStorageV1CSINodeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func NewAddItemAccepted() *AddItemAccepted {\n\n\treturn &AddItemAccepted{}\n}", "func NewCreateCoreV1NamespacedServiceAccountTokenAccepted() *CreateCoreV1NamespacedServiceAccountTokenAccepted {\n\treturn &CreateCoreV1NamespacedServiceAccountTokenAccepted{}\n}", "func NewCreateAuthenticationV1beta1TokenReviewAccepted() *CreateAuthenticationV1beta1TokenReviewAccepted {\n\n\treturn &CreateAuthenticationV1beta1TokenReviewAccepted{}\n}", "func (o *CreateTaskAccepted) WithPayload(payload strfmt.UUID) *CreateTaskAccepted {\n\to.Payload = payload\n\treturn o\n}", "func NewCreateRbacAuthorizationV1alpha1NamespacedRoleAccepted() *CreateRbacAuthorizationV1alpha1NamespacedRoleAccepted {\n\n\treturn &CreateRbacAuthorizationV1alpha1NamespacedRoleAccepted{}\n}", "func (client *WebAppsClient) createOrUpdateVnetConnectionGatewaySlotCreateRequest(ctx context.Context, resourceGroupName string, name string, vnetName string, gatewayName string, slot string, connectionEnvelope VnetGateway, options *WebAppsCreateOrUpdateVnetConnectionGatewaySlotOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif vnetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vnetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vnetName}\", url.PathEscape(vnetName))\n\tif gatewayName == \"\" {\n\t\treturn nil, errors.New(\"parameter gatewayName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{gatewayName}\", url.PathEscape(gatewayName))\n\tif slot == \"\" {\n\t\treturn nil, errors.New(\"parameter slot cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{slot}\", url.PathEscape(slot))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, connectionEnvelope)\n}", "func CreatePolicyWithDefaults(nbmaster string, httpClient *http.Client, jwt string) {\r\n fmt.Printf(\"\\nSending a POST request to create %s with defaults...\\n\", testPolicyName)\r\n\r\n policy := map[string]interface{}{\r\n \"data\": map[string]interface{}{\r\n \"type\": \"policy\",\r\n \"id\": testPolicyName,\r\n \"attributes\": map[string]interface{}{\r\n \"policy\": map[string]interface{}{\r\n \"policyName\": testPolicyName,\r\n \"policyType\": \"VMware\",\r\n \"policyAttributes\": map[string]interface{}{},\r\n \"clients\":[]interface{}{},\r\n \"schedules\":[]interface{}{},\r\n \"backupSelections\": map[string]interface{}{\r\n \"selections\": []interface{}{}}}}}}\r\n\r\n policyRequest, _ := json.Marshal(policy)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/\" + policiesUri\r\n\r\n request, _ := http.NewRequest(http.MethodPost, uri, bytes.NewBuffer(policyRequest))\r\n request.Header.Add(\"Content-Type\", contentTypeV2);\r\n request.Header.Add(\"Authorization\", jwt);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to create policy.\\n\")\r\n } else {\r\n if response.StatusCode != 204 {\r\n printErrorResponse(response)\r\n } else {\r\n fmt.Printf(\"%s created successfully.\\n\", testPolicyName);\r\n responseDetails, _ := httputil.DumpResponse(response, true);\r\n fmt.Printf(string(responseDetails))\r\n }\r\n }\r\n}", "func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) {\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"restype\", \"container\")\n\tif options != nil && options.Timeout != nil {\n\t\treqQP.Set(\"timeout\", strconv.FormatInt(int64(*options.Timeout), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.Metadata != nil {\n\t\tfor k, v := range options.Metadata {\n\t\t\tif v != nil {\n\t\t\t\treq.Raw().Header[\"x-ms-meta-\"+k] = []string{*v}\n\t\t\t}\n\t\t}\n\t}\n\tif options != nil && options.Access != nil {\n\t\treq.Raw().Header[\"x-ms-blob-public-access\"] = []string{string(*options.Access)}\n\t}\n\treq.Raw().Header[\"x-ms-version\"] = []string{\"2020-10-02\"}\n\tif options != nil && options.RequestID != nil {\n\t\treq.Raw().Header[\"x-ms-client-request-id\"] = []string{*options.RequestID}\n\t}\n\tif containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil {\n\t\treq.Raw().Header[\"x-ms-default-encryption-scope\"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope}\n\t}\n\tif containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil {\n\t\treq.Raw().Header[\"x-ms-deny-encryption-scope-override\"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/xml\"}\n\treturn req, nil\n}", "func NewCreateAwsCloudAccountAsyncAccepted() *CreateAwsCloudAccountAsyncAccepted {\n\treturn &CreateAwsCloudAccountAsyncAccepted{}\n}", "func ExampleBackupVaultsClient_BeginCreateOrUpdate_createBackupVaultWithMsi() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armdataprotection.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewBackupVaultsClient().BeginCreateOrUpdate(ctx, \"SampleResourceGroup\", \"swaggerExample\", armdataprotection.BackupVaultResource{\n\t\tLocation: to.Ptr(\"WestUS\"),\n\t\tTags: map[string]*string{\n\t\t\t\"key1\": to.Ptr(\"val1\"),\n\t\t},\n\t\tIdentity: &armdataprotection.DppIdentityDetails{\n\t\t\tType: to.Ptr(\"systemAssigned\"),\n\t\t},\n\t\tProperties: &armdataprotection.BackupVault{\n\t\t\tFeatureSettings: &armdataprotection.FeatureSettings{\n\t\t\t\tCrossRegionRestoreSettings: &armdataprotection.CrossRegionRestoreSettings{\n\t\t\t\t\tState: to.Ptr(armdataprotection.CrossRegionRestoreStateEnabled),\n\t\t\t\t},\n\t\t\t},\n\t\t\tMonitoringSettings: &armdataprotection.MonitoringSettings{\n\t\t\t\tAzureMonitorAlertSettings: &armdataprotection.AzureMonitorAlertSettings{\n\t\t\t\t\tAlertsForAllJobFailures: to.Ptr(armdataprotection.AlertsStateEnabled),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSecuritySettings: &armdataprotection.SecuritySettings{\n\t\t\t\tSoftDeleteSettings: &armdataprotection.SoftDeleteSettings{\n\t\t\t\t\tRetentionDurationInDays: to.Ptr[float64](14),\n\t\t\t\t\tState: to.Ptr(armdataprotection.SoftDeleteState(\"Enabled\")),\n\t\t\t\t},\n\t\t\t},\n\t\t\tStorageSettings: []*armdataprotection.StorageSetting{\n\t\t\t\t{\n\t\t\t\t\tType: to.Ptr(armdataprotection.StorageSettingTypesLocallyRedundant),\n\t\t\t\t\tDatastoreType: to.Ptr(armdataprotection.StorageSettingStoreTypesVaultStore),\n\t\t\t\t}},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.BackupVaultResource = armdataprotection.BackupVaultResource{\n\t// \tName: to.Ptr(\"swaggerExample\"),\n\t// \tType: to.Ptr(\"Microsoft.DataProtection/Backupvaults\"),\n\t// \tID: to.Ptr(\"/subscriptions/0b352192-dcac-4cc7-992e-a96190ccc68c/resourceGroups/SampleResourceGroup/providers/Microsoft.DataProtection/Backupvaults/swaggerExample\"),\n\t// \tLocation: to.Ptr(\"WestUS\"),\n\t// \tTags: map[string]*string{\n\t// \t\t\"key1\": to.Ptr(\"val1\"),\n\t// \t},\n\t// \tIdentity: &armdataprotection.DppIdentityDetails{\n\t// \t\tType: to.Ptr(\"SystemAssigned\"),\n\t// \t\tPrincipalID: to.Ptr(\"c009b9a0-0024-417c-83cd-025d3776045d\"),\n\t// \t\tTenantID: to.Ptr(\"83abe5cd-bcc3-441a-bd86-e6a75360cecc\"),\n\t// \t},\n\t// \tProperties: &armdataprotection.BackupVault{\n\t// \t\tFeatureSettings: &armdataprotection.FeatureSettings{\n\t// \t\t\tCrossRegionRestoreSettings: &armdataprotection.CrossRegionRestoreSettings{\n\t// \t\t\t\tState: to.Ptr(armdataprotection.CrossRegionRestoreStateEnabled),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\tMonitoringSettings: &armdataprotection.MonitoringSettings{\n\t// \t\t\tAzureMonitorAlertSettings: &armdataprotection.AzureMonitorAlertSettings{\n\t// \t\t\t\tAlertsForAllJobFailures: to.Ptr(armdataprotection.AlertsStateEnabled),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\tProvisioningState: to.Ptr(armdataprotection.ProvisioningStateSucceeded),\n\t// \t\tSecureScore: to.Ptr(armdataprotection.SecureScoreLevelAdequate),\n\t// \t\tSecuritySettings: &armdataprotection.SecuritySettings{\n\t// \t\t\tSoftDeleteSettings: &armdataprotection.SoftDeleteSettings{\n\t// \t\t\t\tRetentionDurationInDays: to.Ptr[float64](14),\n\t// \t\t\t\tState: to.Ptr(armdataprotection.SoftDeleteState(\"Enabled\")),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\tStorageSettings: []*armdataprotection.StorageSetting{\n\t// \t\t\t{\n\t// \t\t\t\tType: to.Ptr(armdataprotection.StorageSettingTypesLocallyRedundant),\n\t// \t\t\t\tDatastoreType: to.Ptr(armdataprotection.StorageSettingStoreTypesVaultStore),\n\t// \t\t}},\n\t// \t},\n\t// }\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n session, err := store.Get(r, \"session-name\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n s3 := S3{\n EndPointString: session.Values[\"Endpoint\"].(string),\n AccessKey: session.Values[\"AccessKey\"].(string),\n SecretKey: session.Values[\"SecretKey\"].(string),\n Namespace: session.Values[\"Namespace\"].(string),\n }\n\n decoder := json.NewDecoder(r.Body)\n var bucket NewBucket\n err = decoder.Decode(&bucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n\n // Add the necessary headers for Metadata Search and Access During Outage\n createBucketHeaders := map[string][]string{}\n createBucketHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n createBucketHeaders[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n createBucketHeaders[\"x-emc-metadata-search\"] = []string{\"ObjectName,x-amz-meta-image-width;Integer,x-amz-meta-image-height;Integer,x-amz-meta-gps-latitude;Decimal,x-amz-meta-gps-longitude;Decimal\"}\n\n createBucketResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/\", createBucketHeaders, \"\")\n\n // Enable CORS after the bucket creation to allow the web browser to send requests directly to ECS\n if createBucketResponse.Code == 200 {\n enableBucketCorsHeaders := map[string][]string{}\n enableBucketCorsHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n corsConfiguration := `\n <CORSConfiguration>\n <CORSRule>\n <AllowedOrigin>*</AllowedOrigin>\n <AllowedHeader>*</AllowedHeader>\n <ExposeHeader>x-amz-meta-image-width</ExposeHeader>\n <ExposeHeader>x-amz-meta-image-height</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-latitude</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-longitude</ExposeHeader>\n <AllowedMethod>HEAD</AllowedMethod>\n <AllowedMethod>GET</AllowedMethod>\n <AllowedMethod>PUT</AllowedMethod>\n <AllowedMethod>POST</AllowedMethod>\n <AllowedMethod>DELETE</AllowedMethod>\n </CORSRule>\n </CORSConfiguration>\n `\n enableBucketCorsResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/?cors\", enableBucketCorsHeaders, corsConfiguration)\n if enableBucketCorsResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, struct {\n CorsConfiguration string `json:\"cors_configuration\"`\n Bucket string `json:\"bucket\"`\n } {\n CorsConfiguration: corsConfiguration,\n Bucket: bucket.Name,\n })\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket created, but CORS can't be enabled\"}\n }\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket can't be created\"}\n }\n return nil\n}", "func NewInternalV1StorageRegionsThresholdsPutAccepted() *InternalV1StorageRegionsThresholdsPutAccepted {\n\treturn &InternalV1StorageRegionsThresholdsPutAccepted{}\n}", "func ExampleVaultsClient_BeginCreateOrUpdate_createOrUpdateAVaultWithNetworkAcls() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armkeyvault.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewVaultsClient().BeginCreateOrUpdate(ctx, \"sample-resource-group\", \"sample-vault\", armkeyvault.VaultCreateOrUpdateParameters{\n\t\tLocation: to.Ptr(\"westus\"),\n\t\tProperties: &armkeyvault.VaultProperties{\n\t\t\tEnabledForDeployment: to.Ptr(true),\n\t\t\tEnabledForDiskEncryption: to.Ptr(true),\n\t\t\tEnabledForTemplateDeployment: to.Ptr(true),\n\t\t\tNetworkACLs: &armkeyvault.NetworkRuleSet{\n\t\t\t\tBypass: to.Ptr(armkeyvault.NetworkRuleBypassOptionsAzureServices),\n\t\t\t\tDefaultAction: to.Ptr(armkeyvault.NetworkRuleActionDeny),\n\t\t\t\tIPRules: []*armkeyvault.IPRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tValue: to.Ptr(\"124.56.78.91\"),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValue: to.Ptr(\"'10.91.4.0/24'\"),\n\t\t\t\t\t}},\n\t\t\t\tVirtualNetworkRules: []*armkeyvault.VirtualNetworkRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tID: to.Ptr(\"/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/subnet1\"),\n\t\t\t\t\t}},\n\t\t\t},\n\t\t\tSKU: &armkeyvault.SKU{\n\t\t\t\tName: to.Ptr(armkeyvault.SKUNameStandard),\n\t\t\t\tFamily: to.Ptr(armkeyvault.SKUFamilyA),\n\t\t\t},\n\t\t\tTenantID: to.Ptr(\"00000000-0000-0000-0000-000000000000\"),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Vault = armkeyvault.Vault{\n\t// \tName: to.Ptr(\"sample-vault\"),\n\t// \tType: to.Ptr(\"Microsoft.KeyVault/vaults\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sample-resource-group/providers/Microsoft.KeyVault/vaults/sample-vault\"),\n\t// \tLocation: to.Ptr(\"westus\"),\n\t// \tProperties: &armkeyvault.VaultProperties{\n\t// \t\tEnabledForDeployment: to.Ptr(true),\n\t// \t\tEnabledForDiskEncryption: to.Ptr(true),\n\t// \t\tEnabledForTemplateDeployment: to.Ptr(true),\n\t// \t\tHsmPoolResourceID: to.Ptr(\"00000000-0000-0000-0000-000000000000\"),\n\t// \t\tNetworkACLs: &armkeyvault.NetworkRuleSet{\n\t// \t\t\tBypass: to.Ptr(armkeyvault.NetworkRuleBypassOptionsAzureServices),\n\t// \t\t\tDefaultAction: to.Ptr(armkeyvault.NetworkRuleActionDeny),\n\t// \t\t\tIPRules: []*armkeyvault.IPRule{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tValue: to.Ptr(\"124.56.78.91/32\"),\n\t// \t\t\t\t},\n\t// \t\t\t\t{\n\t// \t\t\t\t\tValue: to.Ptr(\"'10.91.4.0/24'\"),\n\t// \t\t\t}},\n\t// \t\t\tVirtualNetworkRules: []*armkeyvault.VirtualNetworkRule{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tID: to.Ptr(\"/subscriptions/subid/resourcegroups/rg1/providers/microsoft.network/virtualnetworks/test-vnet/subnets/subnet1\"),\n\t// \t\t\t}},\n\t// \t\t},\n\t// \t\tSKU: &armkeyvault.SKU{\n\t// \t\t\tName: to.Ptr(armkeyvault.SKUNameStandard),\n\t// \t\t\tFamily: to.Ptr(armkeyvault.SKUFamilyA),\n\t// \t\t},\n\t// \t\tTenantID: to.Ptr(\"00000000-0000-0000-0000-000000000000\"),\n\t// \t\tVaultURI: to.Ptr(\"https://sample-vault.vault.azure.net\"),\n\t// \t},\n\t// \tSystemData: &armkeyvault.SystemData{\n\t// \t\tCreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-01-01T12:00:00.0000000Z\"); return t}()),\n\t// \t\tCreatedBy: to.Ptr(\"keyVaultUser1\"),\n\t// \t\tCreatedByType: to.Ptr(armkeyvault.IdentityTypeUser),\n\t// \t\tLastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-01-01T12:00:00.0000000Z\"); return t}()),\n\t// \t\tLastModifiedBy: to.Ptr(\"keyVaultUser2\"),\n\t// \t\tLastModifiedByType: to.Ptr(armkeyvault.IdentityTypeUser),\n\t// \t},\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// }\n}", "func (r ApiCreateHyperflexClusterStoragePolicyRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexClusterStoragePolicyRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}", "func NewWeaviateThingTemplatesCreateAccepted() *WeaviateThingTemplatesCreateAccepted {\n\treturn &WeaviateThingTemplatesCreateAccepted{}\n}", "func (o *CreateStorageV1CSINodeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func only_make_agreement(ag *contract_api.SolidityContract, agID []byte, sig_hash string, sig string, counterparty string) {\n err := error(nil)\n\n log.Printf(\"Make an agreement with ID:%v\\n\", agID)\n p := make([]interface{},0,10)\n p = append(p, agID)\n p = append(p, sig_hash[2:])\n p = append(p, sig[2:])\n p = append(p, counterparty)\n if _, err = ag.Invoke_method(\"create_agreement\", p); err != nil {\n log.Printf(\"...terminating, could not invoke create_agreement: %v\\n\", err)\n os.Exit(1)\n }\n log.Printf(\"Create agreement %v successfully submitted.\\n\", agID)\n}", "func NewCreateIOCDefault(code int) *CreateIOCDefault {\n\treturn &CreateIOCDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewReadFromMicrostorageAccepted() *ReadFromMicrostorageAccepted {\n\treturn &ReadFromMicrostorageAccepted{}\n}", "func CreateBeginVnDialogueRequest() (request *BeginVnDialogueRequest) {\n\trequest = &BeginVnDialogueRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CloudCallCenter\", \"2017-07-05\", \"BeginVnDialogue\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (a *LicenseAgreementApiService) GetCurrentLicenseAgreementAcceptedExecute(r ApiGetCurrentLicenseAgreementAcceptedRequest) (LicenseAgreementAcceptResult, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue LicenseAgreementAcceptResult\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"LicenseAgreementApiService.GetCurrentLicenseAgreementAccepted\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/licenseAgreement/accepted\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\tif r.xApiVersion == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"xApiVersion is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"application/problem+json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tlocalVarHeaderParams[\"x-api-version\"] = parameterToString(*r.xApiVersion, \"\")\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif apiKey, ok := auth[\"Bearer\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif apiKey.Prefix != \"\" {\n\t\t\t\t\tkey = apiKey.Prefix + \" \" + apiKey.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = apiKey.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func resourceVolterraNetworkInterfaceCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tcreateMeta := &ves_io_schema.ObjectCreateMetaType{}\n\tcreateSpec := &ves_io_schema_network_interface.CreateSpecType{}\n\tcreateReq := &ves_io_schema_network_interface.CreateRequest{\n\t\tMetadata: createMeta,\n\t\tSpec: createSpec,\n\t}\n\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\t//interface_choice\n\n\tinterfaceChoiceTypeFound := false\n\n\tif v, ok := d.GetOk(\"dedicated_interface\"); ok && !interfaceChoiceTypeFound {\n\n\t\tinterfaceChoiceTypeFound = true\n\t\tinterfaceChoiceInt := &ves_io_schema_network_interface.CreateSpecType_DedicatedInterface{}\n\t\tinterfaceChoiceInt.DedicatedInterface = &ves_io_schema_network_interface.DedicatedInterfaceType{}\n\t\tcreateSpec.InterfaceChoice = interfaceChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t// device\n\n\t\t\tif v, ok := cs[\"device\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.DedicatedInterface.Device = v.(string)\n\t\t\t}\n\n\t\t\t// monitoring_choice\n\n\t\t\tmonitoringChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"monitor\"]; ok && !isIntfNil(v) && !monitoringChoiceTypeFound {\n\n\t\t\t\tmonitoringChoiceTypeFound = true\n\t\t\t\t_ = v\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"monitor_disabled\"]; ok && !isIntfNil(v) && !monitoringChoiceTypeFound {\n\n\t\t\t\tmonitoringChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tmonitoringChoiceInt := &ves_io_schema_network_interface.DedicatedInterfaceType_MonitorDisabled{}\n\t\t\t\t\tmonitoringChoiceInt.MonitorDisabled = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.DedicatedInterface.MonitoringChoice = monitoringChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// mtu\n\n\t\t\tif v, ok := cs[\"mtu\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.DedicatedInterface.Mtu = uint32(v.(int))\n\t\t\t}\n\n\t\t\t// node_choice\n\n\t\t\tnodeChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"cluster\"]; ok && !isIntfNil(v) && !nodeChoiceTypeFound {\n\n\t\t\t\tnodeChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnodeChoiceInt := &ves_io_schema_network_interface.DedicatedInterfaceType_Cluster{}\n\t\t\t\t\tnodeChoiceInt.Cluster = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.DedicatedInterface.NodeChoice = nodeChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"node\"]; ok && !isIntfNil(v) && !nodeChoiceTypeFound {\n\n\t\t\t\tnodeChoiceTypeFound = true\n\t\t\t\tnodeChoiceInt := &ves_io_schema_network_interface.DedicatedInterfaceType_Node{}\n\n\t\t\t\tinterfaceChoiceInt.DedicatedInterface.NodeChoice = nodeChoiceInt\n\n\t\t\t\tnodeChoiceInt.Node = v.(string)\n\n\t\t\t}\n\n\t\t\t// primary_choice\n\n\t\t\tprimaryChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"is_primary\"]; ok && !isIntfNil(v) && !primaryChoiceTypeFound {\n\n\t\t\t\tprimaryChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tprimaryChoiceInt := &ves_io_schema_network_interface.DedicatedInterfaceType_IsPrimary{}\n\t\t\t\t\tprimaryChoiceInt.IsPrimary = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.DedicatedInterface.PrimaryChoice = primaryChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"not_primary\"]; ok && !isIntfNil(v) && !primaryChoiceTypeFound {\n\n\t\t\t\tprimaryChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tprimaryChoiceInt := &ves_io_schema_network_interface.DedicatedInterfaceType_NotPrimary{}\n\t\t\t\t\tprimaryChoiceInt.NotPrimary = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.DedicatedInterface.PrimaryChoice = primaryChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// priority\n\n\t\t\tif v, ok := cs[\"priority\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.DedicatedInterface.Priority = uint32(v.(int))\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"dedicated_management_interface\"); ok && !interfaceChoiceTypeFound {\n\n\t\tinterfaceChoiceTypeFound = true\n\t\tinterfaceChoiceInt := &ves_io_schema_network_interface.CreateSpecType_DedicatedManagementInterface{}\n\t\tinterfaceChoiceInt.DedicatedManagementInterface = &ves_io_schema_network_interface.DedicatedManagementInterfaceType{}\n\t\tcreateSpec.InterfaceChoice = interfaceChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t// device\n\n\t\t\tif v, ok := cs[\"device\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.DedicatedManagementInterface.Device = v.(string)\n\t\t\t}\n\n\t\t\t// mtu\n\n\t\t\tif v, ok := cs[\"mtu\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.DedicatedManagementInterface.Mtu = uint32(v.(int))\n\t\t\t}\n\n\t\t\t// node_choice\n\n\t\t\tnodeChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"cluster\"]; ok && !isIntfNil(v) && !nodeChoiceTypeFound {\n\n\t\t\t\tnodeChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnodeChoiceInt := &ves_io_schema_network_interface.DedicatedManagementInterfaceType_Cluster{}\n\t\t\t\t\tnodeChoiceInt.Cluster = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.DedicatedManagementInterface.NodeChoice = nodeChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"node\"]; ok && !isIntfNil(v) && !nodeChoiceTypeFound {\n\n\t\t\t\tnodeChoiceTypeFound = true\n\t\t\t\tnodeChoiceInt := &ves_io_schema_network_interface.DedicatedManagementInterfaceType_Node{}\n\n\t\t\t\tinterfaceChoiceInt.DedicatedManagementInterface.NodeChoice = nodeChoiceInt\n\n\t\t\t\tnodeChoiceInt.Node = v.(string)\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"ethernet_interface\"); ok && !interfaceChoiceTypeFound {\n\n\t\tinterfaceChoiceTypeFound = true\n\t\tinterfaceChoiceInt := &ves_io_schema_network_interface.CreateSpecType_EthernetInterface{}\n\t\tinterfaceChoiceInt.EthernetInterface = &ves_io_schema_network_interface.EthernetInterfaceType{}\n\t\tcreateSpec.InterfaceChoice = interfaceChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t// address_choice\n\n\t\t\taddressChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"dhcp_client\"]; ok && !isIntfNil(v) && !addressChoiceTypeFound {\n\n\t\t\t\taddressChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\taddressChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_DhcpClient{}\n\t\t\t\t\taddressChoiceInt.DhcpClient = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.EthernetInterface.AddressChoice = addressChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"dhcp_server\"]; ok && !isIntfNil(v) && !addressChoiceTypeFound {\n\n\t\t\t\taddressChoiceTypeFound = true\n\t\t\t\taddressChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_DhcpServer{}\n\t\t\t\taddressChoiceInt.DhcpServer = &ves_io_schema_network_interface.DHCPServerParametersType{}\n\t\t\t\tinterfaceChoiceInt.EthernetInterface.AddressChoice = addressChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t// dhcp_networks\n\n\t\t\t\t\tif v, ok := cs[\"dhcp_networks\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tdhcpNetworks := make([]*ves_io_schema_network_interface.DHCPNetworkType, len(sl))\n\t\t\t\t\t\taddressChoiceInt.DhcpServer.DhcpNetworks = dhcpNetworks\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tdhcpNetworks[i] = &ves_io_schema_network_interface.DHCPNetworkType{}\n\t\t\t\t\t\t\tdhcpNetworksMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// dns_choice\n\n\t\t\t\t\t\t\tdnsChoiceTypeFound := false\n\n\t\t\t\t\t\t\tif v, ok := dhcpNetworksMapStrToI[\"dns_address\"]; ok && !isIntfNil(v) && !dnsChoiceTypeFound {\n\n\t\t\t\t\t\t\t\tdnsChoiceTypeFound = true\n\t\t\t\t\t\t\t\tdnsChoiceInt := &ves_io_schema_network_interface.DHCPNetworkType_DnsAddress{}\n\n\t\t\t\t\t\t\t\tdhcpNetworks[i].DnsChoice = dnsChoiceInt\n\n\t\t\t\t\t\t\t\tdnsChoiceInt.DnsAddress = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := dhcpNetworksMapStrToI[\"same_as_dgw\"]; ok && !isIntfNil(v) && !dnsChoiceTypeFound {\n\n\t\t\t\t\t\t\t\tdnsChoiceTypeFound = true\n\n\t\t\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\t\t\tdnsChoiceInt := &ves_io_schema_network_interface.DHCPNetworkType_SameAsDgw{}\n\t\t\t\t\t\t\t\t\tdnsChoiceInt.SameAsDgw = &ves_io_schema.Empty{}\n\t\t\t\t\t\t\t\t\tdhcpNetworks[i].DnsChoice = dnsChoiceInt\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// gateway_choice\n\n\t\t\t\t\t\t\tgatewayChoiceTypeFound := false\n\n\t\t\t\t\t\t\tif v, ok := dhcpNetworksMapStrToI[\"dgw_address\"]; ok && !isIntfNil(v) && !gatewayChoiceTypeFound {\n\n\t\t\t\t\t\t\t\tgatewayChoiceTypeFound = true\n\t\t\t\t\t\t\t\tgatewayChoiceInt := &ves_io_schema_network_interface.DHCPNetworkType_DgwAddress{}\n\n\t\t\t\t\t\t\t\tdhcpNetworks[i].GatewayChoice = gatewayChoiceInt\n\n\t\t\t\t\t\t\t\tgatewayChoiceInt.DgwAddress = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := dhcpNetworksMapStrToI[\"first_address\"]; ok && !isIntfNil(v) && !gatewayChoiceTypeFound {\n\n\t\t\t\t\t\t\t\tgatewayChoiceTypeFound = true\n\n\t\t\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\t\t\tgatewayChoiceInt := &ves_io_schema_network_interface.DHCPNetworkType_FirstAddress{}\n\t\t\t\t\t\t\t\t\tgatewayChoiceInt.FirstAddress = &ves_io_schema.Empty{}\n\t\t\t\t\t\t\t\t\tdhcpNetworks[i].GatewayChoice = gatewayChoiceInt\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := dhcpNetworksMapStrToI[\"last_address\"]; ok && !isIntfNil(v) && !gatewayChoiceTypeFound {\n\n\t\t\t\t\t\t\t\tgatewayChoiceTypeFound = true\n\n\t\t\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\t\t\tgatewayChoiceInt := &ves_io_schema_network_interface.DHCPNetworkType_LastAddress{}\n\t\t\t\t\t\t\t\t\tgatewayChoiceInt.LastAddress = &ves_io_schema.Empty{}\n\t\t\t\t\t\t\t\t\tdhcpNetworks[i].GatewayChoice = gatewayChoiceInt\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// network_prefix_choice\n\n\t\t\t\t\t\t\tnetworkPrefixChoiceTypeFound := false\n\n\t\t\t\t\t\t\tif v, ok := dhcpNetworksMapStrToI[\"network_prefix\"]; ok && !isIntfNil(v) && !networkPrefixChoiceTypeFound {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceTypeFound = true\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt := &ves_io_schema_network_interface.DHCPNetworkType_NetworkPrefix{}\n\n\t\t\t\t\t\t\t\tdhcpNetworks[i].NetworkPrefixChoice = networkPrefixChoiceInt\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NetworkPrefix = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := dhcpNetworksMapStrToI[\"network_prefix_allocator\"]; ok && !isIntfNil(v) && !networkPrefixChoiceTypeFound {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceTypeFound = true\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt := &ves_io_schema_network_interface.DHCPNetworkType_NetworkPrefixAllocator{}\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NetworkPrefixAllocator = &ves_io_schema_views.ObjectRefType{}\n\t\t\t\t\t\t\t\tdhcpNetworks[i].NetworkPrefixChoice = networkPrefixChoiceInt\n\n\t\t\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t\t\t// name\n\n\t\t\t\t\t\t\t\t\tif v, ok := cs[\"name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NetworkPrefixAllocator.Name = v.(string)\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t// namespace\n\n\t\t\t\t\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NetworkPrefixAllocator.Namespace = v.(string)\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t// tenant\n\n\t\t\t\t\t\t\t\t\tif v, ok := cs[\"tenant\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NetworkPrefixAllocator.Tenant = v.(string)\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// pool_settings\n\n\t\t\t\t\t\t\tif v, ok := dhcpNetworksMapStrToI[\"pool_settings\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tdhcpNetworks[i].PoolSettings = ves_io_schema_network_interface.DHCPPoolSettingType(ves_io_schema_network_interface.DHCPPoolSettingType_value[v.(string)])\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// pools\n\n\t\t\t\t\t\t\tif v, ok := dhcpNetworksMapStrToI[\"pools\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\t\t\tpools := make([]*ves_io_schema_network_interface.DHCPPoolType, len(sl))\n\t\t\t\t\t\t\t\tdhcpNetworks[i].Pools = pools\n\t\t\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\t\t\tpools[i] = &ves_io_schema_network_interface.DHCPPoolType{}\n\t\t\t\t\t\t\t\t\tpoolsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t\t\t// end_ip\n\n\t\t\t\t\t\t\t\t\tif w, ok := poolsMapStrToI[\"end_ip\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\tpools[i].EndIp = w.(string)\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t// exclude\n\n\t\t\t\t\t\t\t\t\tif w, ok := poolsMapStrToI[\"exclude\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\tpools[i].Exclude = w.(bool)\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t// start_ip\n\n\t\t\t\t\t\t\t\t\tif w, ok := poolsMapStrToI[\"start_ip\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\tpools[i].StartIp = w.(string)\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\t// dhcp_option82_tag\n\n\t\t\t\t\tif v, ok := cs[\"dhcp_option82_tag\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\taddressChoiceInt.DhcpServer.DhcpOption82Tag = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\t// fixed_ip_map\n\n\t\t\t\t\tif v, ok := cs[\"fixed_ip_map\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tms := map[string]string{}\n\t\t\t\t\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\t\t\t\t\tms[k] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\taddressChoiceInt.DhcpServer.FixedIpMap = ms\n\t\t\t\t\t}\n\n\t\t\t\t\t// interfaces_addressing_choice\n\n\t\t\t\t\tinterfacesAddressingChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := cs[\"automatic_from_end\"]; ok && !isIntfNil(v) && !interfacesAddressingChoiceTypeFound {\n\n\t\t\t\t\t\tinterfacesAddressingChoiceTypeFound = true\n\n\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\tinterfacesAddressingChoiceInt := &ves_io_schema_network_interface.DHCPServerParametersType_AutomaticFromEnd{}\n\t\t\t\t\t\t\tinterfacesAddressingChoiceInt.AutomaticFromEnd = &ves_io_schema.Empty{}\n\t\t\t\t\t\t\taddressChoiceInt.DhcpServer.InterfacesAddressingChoice = interfacesAddressingChoiceInt\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"automatic_from_start\"]; ok && !isIntfNil(v) && !interfacesAddressingChoiceTypeFound {\n\n\t\t\t\t\t\tinterfacesAddressingChoiceTypeFound = true\n\n\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\tinterfacesAddressingChoiceInt := &ves_io_schema_network_interface.DHCPServerParametersType_AutomaticFromStart{}\n\t\t\t\t\t\t\tinterfacesAddressingChoiceInt.AutomaticFromStart = &ves_io_schema.Empty{}\n\t\t\t\t\t\t\taddressChoiceInt.DhcpServer.InterfacesAddressingChoice = interfacesAddressingChoiceInt\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"interface_ip_map\"]; ok && !isIntfNil(v) && !interfacesAddressingChoiceTypeFound {\n\n\t\t\t\t\t\tinterfacesAddressingChoiceTypeFound = true\n\t\t\t\t\t\tinterfacesAddressingChoiceInt := &ves_io_schema_network_interface.DHCPServerParametersType_InterfaceIpMap{}\n\t\t\t\t\t\tinterfacesAddressingChoiceInt.InterfaceIpMap = &ves_io_schema_network_interface.DHCPInterfaceIPType{}\n\t\t\t\t\t\taddressChoiceInt.DhcpServer.InterfacesAddressingChoice = interfacesAddressingChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// interface_ip_map\n\n\t\t\t\t\t\t\tif v, ok := cs[\"interface_ip_map\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tms := map[string]string{}\n\t\t\t\t\t\t\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\t\t\t\t\t\t\tms[k] = v.(string)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tinterfacesAddressingChoiceInt.InterfaceIpMap.InterfaceIpMap = ms\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"static_ip\"]; ok && !isIntfNil(v) && !addressChoiceTypeFound {\n\n\t\t\t\taddressChoiceTypeFound = true\n\t\t\t\taddressChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_StaticIp{}\n\t\t\t\taddressChoiceInt.StaticIp = &ves_io_schema_network_interface.StaticIPParametersType{}\n\t\t\t\tinterfaceChoiceInt.EthernetInterface.AddressChoice = addressChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t// network_prefix_choice\n\n\t\t\t\t\tnetworkPrefixChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := cs[\"cluster_static_ip\"]; ok && !isIntfNil(v) && !networkPrefixChoiceTypeFound {\n\n\t\t\t\t\t\tnetworkPrefixChoiceTypeFound = true\n\t\t\t\t\t\tnetworkPrefixChoiceInt := &ves_io_schema_network_interface.StaticIPParametersType_ClusterStaticIp{}\n\t\t\t\t\t\tnetworkPrefixChoiceInt.ClusterStaticIp = &ves_io_schema_network_interface.StaticIpParametersClusterType{}\n\t\t\t\t\t\taddressChoiceInt.StaticIp.NetworkPrefixChoice = networkPrefixChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// interface_ip_map\n\n\t\t\t\t\t\t\tif v, ok := cs[\"interface_ip_map\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\t\t\tinterfaceIpMap := make(map[string]*ves_io_schema_network_interface.StaticIpParametersNodeType)\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.ClusterStaticIp.InterfaceIpMap = interfaceIpMap\n\t\t\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\t\t\tinterfaceIpMapMapStrToI := set.(map[string]interface{})\n\t\t\t\t\t\t\t\t\tkey, ok := interfaceIpMapMapStrToI[\"name\"]\n\t\t\t\t\t\t\t\t\tif ok && !isIntfNil(key) {\n\t\t\t\t\t\t\t\t\t\tinterfaceIpMap[key.(string)] = &ves_io_schema_network_interface.StaticIpParametersNodeType{}\n\t\t\t\t\t\t\t\t\t\tval, _ := interfaceIpMapMapStrToI[\"value\"]\n\n\t\t\t\t\t\t\t\t\t\tinterfaceIpMapVals := val.(*schema.Set).List()\n\t\t\t\t\t\t\t\t\t\tfor _, intVal := range interfaceIpMapVals {\n\n\t\t\t\t\t\t\t\t\t\t\tinterfaceIpMapStaticMap := intVal.(map[string]interface{})\n\n\t\t\t\t\t\t\t\t\t\t\tif w, ok := interfaceIpMapStaticMap[\"default_gw\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\t\t\tinterfaceIpMap[key.(string)].DefaultGw = w.(string)\n\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\tif w, ok := interfaceIpMapStaticMap[\"dns_server\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\t\t\tinterfaceIpMap[key.(string)].DnsServer = w.(string)\n\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\tif w, ok := interfaceIpMapStaticMap[\"ip_address\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\t\t\tinterfaceIpMap[key.(string)].IpAddress = w.(string)\n\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\t// break after one loop\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"fleet_static_ip\"]; ok && !isIntfNil(v) && !networkPrefixChoiceTypeFound {\n\n\t\t\t\t\t\tnetworkPrefixChoiceTypeFound = true\n\t\t\t\t\t\tnetworkPrefixChoiceInt := &ves_io_schema_network_interface.StaticIPParametersType_FleetStaticIp{}\n\t\t\t\t\t\tnetworkPrefixChoiceInt.FleetStaticIp = &ves_io_schema_network_interface.StaticIpParametersFleetType{}\n\t\t\t\t\t\taddressChoiceInt.StaticIp.NetworkPrefixChoice = networkPrefixChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// default_gw\n\n\t\t\t\t\t\t\tif v, ok := cs[\"default_gw\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.FleetStaticIp.DefaultGw = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// dns_server\n\n\t\t\t\t\t\t\tif v, ok := cs[\"dns_server\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.FleetStaticIp.DnsServer = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// network_prefix_allocator\n\n\t\t\t\t\t\t\tif v, ok := cs[\"network_prefix_allocator\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixAllocatorInt := &ves_io_schema_views.ObjectRefType{}\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.FleetStaticIp.NetworkPrefixAllocator = networkPrefixAllocatorInt\n\n\t\t\t\t\t\t\t\tnpaMapToStrVal := v.(map[string]interface{})\n\t\t\t\t\t\t\t\tif val, ok := npaMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\t\tnetworkPrefixAllocatorInt.Name = val.(string)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif val, ok := npaMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\t\tnetworkPrefixAllocatorInt.Namespace = val.(string)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tif val, ok := npaMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\t\tnetworkPrefixAllocatorInt.Tenant = val.(string)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"node_static_ip\"]; ok && !isIntfNil(v) && !networkPrefixChoiceTypeFound {\n\n\t\t\t\t\t\tnetworkPrefixChoiceTypeFound = true\n\t\t\t\t\t\tnetworkPrefixChoiceInt := &ves_io_schema_network_interface.StaticIPParametersType_NodeStaticIp{}\n\t\t\t\t\t\tnetworkPrefixChoiceInt.NodeStaticIp = &ves_io_schema_network_interface.StaticIpParametersNodeType{}\n\t\t\t\t\t\taddressChoiceInt.StaticIp.NetworkPrefixChoice = networkPrefixChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// default_gw\n\n\t\t\t\t\t\t\tif v, ok := cs[\"default_gw\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NodeStaticIp.DefaultGw = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// dns_server\n\n\t\t\t\t\t\t\tif v, ok := cs[\"dns_server\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NodeStaticIp.DnsServer = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// ip_address\n\n\t\t\t\t\t\t\tif v, ok := cs[\"ip_address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NodeStaticIp.IpAddress = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// device\n\n\t\t\tif v, ok := cs[\"device\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.EthernetInterface.Device = v.(string)\n\t\t\t}\n\n\t\t\t// monitoring_choice\n\n\t\t\tmonitoringChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"monitor\"]; ok && !isIntfNil(v) && !monitoringChoiceTypeFound {\n\n\t\t\t\tmonitoringChoiceTypeFound = true\n\t\t\t\t_ = v\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"monitor_disabled\"]; ok && !isIntfNil(v) && !monitoringChoiceTypeFound {\n\n\t\t\t\tmonitoringChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tmonitoringChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_MonitorDisabled{}\n\t\t\t\t\tmonitoringChoiceInt.MonitorDisabled = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.EthernetInterface.MonitoringChoice = monitoringChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// mtu\n\n\t\t\tif v, ok := cs[\"mtu\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.EthernetInterface.Mtu = uint32(v.(int))\n\t\t\t}\n\n\t\t\t// network_choice\n\n\t\t\tnetworkChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"inside_network\"]; ok && !isIntfNil(v) && !networkChoiceTypeFound {\n\n\t\t\t\tnetworkChoiceTypeFound = true\n\t\t\t\tnetworkChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_InsideNetwork{}\n\t\t\t\tnetworkChoiceInt.InsideNetwork = &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tinterfaceChoiceInt.EthernetInterface.NetworkChoice = networkChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t// name\n\n\t\t\t\t\tif v, ok := cs[\"name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnetworkChoiceInt.InsideNetwork.Name = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\t// namespace\n\n\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnetworkChoiceInt.InsideNetwork.Namespace = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\t// tenant\n\n\t\t\t\t\tif v, ok := cs[\"tenant\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnetworkChoiceInt.InsideNetwork.Tenant = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"site_local_inside_network\"]; ok && !isIntfNil(v) && !networkChoiceTypeFound {\n\n\t\t\t\tnetworkChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnetworkChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_SiteLocalInsideNetwork{}\n\t\t\t\t\tnetworkChoiceInt.SiteLocalInsideNetwork = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.EthernetInterface.NetworkChoice = networkChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"site_local_network\"]; ok && !isIntfNil(v) && !networkChoiceTypeFound {\n\n\t\t\t\tnetworkChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnetworkChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_SiteLocalNetwork{}\n\t\t\t\t\tnetworkChoiceInt.SiteLocalNetwork = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.EthernetInterface.NetworkChoice = networkChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"storage_network\"]; ok && !isIntfNil(v) && !networkChoiceTypeFound {\n\n\t\t\t\tnetworkChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnetworkChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_StorageNetwork{}\n\t\t\t\t\tnetworkChoiceInt.StorageNetwork = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.EthernetInterface.NetworkChoice = networkChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// node_choice\n\n\t\t\tnodeChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"cluster\"]; ok && !isIntfNil(v) && !nodeChoiceTypeFound {\n\n\t\t\t\tnodeChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnodeChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_Cluster{}\n\t\t\t\t\tnodeChoiceInt.Cluster = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.EthernetInterface.NodeChoice = nodeChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"node\"]; ok && !isIntfNil(v) && !nodeChoiceTypeFound {\n\n\t\t\t\tnodeChoiceTypeFound = true\n\t\t\t\tnodeChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_Node{}\n\n\t\t\t\tinterfaceChoiceInt.EthernetInterface.NodeChoice = nodeChoiceInt\n\n\t\t\t\tnodeChoiceInt.Node = v.(string)\n\n\t\t\t}\n\n\t\t\t// primary_choice\n\n\t\t\tprimaryChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"is_primary\"]; ok && !isIntfNil(v) && !primaryChoiceTypeFound {\n\n\t\t\t\tprimaryChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tprimaryChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_IsPrimary{}\n\t\t\t\t\tprimaryChoiceInt.IsPrimary = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.EthernetInterface.PrimaryChoice = primaryChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"not_primary\"]; ok && !isIntfNil(v) && !primaryChoiceTypeFound {\n\n\t\t\t\tprimaryChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tprimaryChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_NotPrimary{}\n\t\t\t\t\tprimaryChoiceInt.NotPrimary = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.EthernetInterface.PrimaryChoice = primaryChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// priority\n\n\t\t\tif v, ok := cs[\"priority\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.EthernetInterface.Priority = uint32(v.(int))\n\t\t\t}\n\n\t\t\t// vlan_choice\n\n\t\t\tvlanChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"untagged\"]; ok && !isIntfNil(v) && !vlanChoiceTypeFound {\n\n\t\t\t\tvlanChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tvlanChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_Untagged{}\n\t\t\t\t\tvlanChoiceInt.Untagged = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.EthernetInterface.VlanChoice = vlanChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"vlan_id\"]; ok && !isIntfNil(v) && !vlanChoiceTypeFound {\n\n\t\t\t\tvlanChoiceTypeFound = true\n\t\t\t\tvlanChoiceInt := &ves_io_schema_network_interface.EthernetInterfaceType_VlanId{}\n\n\t\t\t\tinterfaceChoiceInt.EthernetInterface.VlanChoice = vlanChoiceInt\n\n\t\t\t\tvlanChoiceInt.VlanId =\n\t\t\t\t\tuint32(v.(int))\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"legacy_interface\"); ok && !interfaceChoiceTypeFound {\n\n\t\tinterfaceChoiceTypeFound = true\n\t\tinterfaceChoiceInt := &ves_io_schema_network_interface.CreateSpecType_LegacyInterface{}\n\t\tinterfaceChoiceInt.LegacyInterface = &ves_io_schema_network_interface.LegacyInterfaceType{}\n\t\tcreateSpec.InterfaceChoice = interfaceChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t// DHCP_server\n\n\t\t\tif v, ok := cs[\"dhcp_server\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.DHCPServer = ves_io_schema_network_interface.NetworkInterfaceDHCPServer(ves_io_schema_network_interface.NetworkInterfaceDHCPServer_value[v.(string)])\n\n\t\t\t}\n\n\t\t\t// DNS_server\n\n\t\t\tif v, ok := cs[\"dns_server\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tdnsServer := &ves_io_schema_network_interface.NetworkInterfaceDNS{}\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.DNSServer = dnsServer\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tdnsServerMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t// dns_mode\n\n\t\t\t\t\tif v, ok := dnsServerMapStrToI[\"dns_mode\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tdnsServer.DnsMode = ves_io_schema_network_interface.NetworkInterfaceDNSMode(ves_io_schema_network_interface.NetworkInterfaceDNSMode_value[v.(string)])\n\n\t\t\t\t\t}\n\n\t\t\t\t\t// dns_server\n\n\t\t\t\t\tif v, ok := dnsServerMapStrToI[\"dns_server\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tdnsServerIpv4s := make([]*ves_io_schema.Ipv4AddressType, len(sl))\n\t\t\t\t\t\tdnsServer.DnsServer = dnsServerIpv4s\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tdnsServerIpv4s[i] = &ves_io_schema.Ipv4AddressType{}\n\t\t\t\t\t\t\tdnsServerMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// addr\n\n\t\t\t\t\t\t\tif w, ok := dnsServerMapStrToI[\"addr\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tdnsServerIpv4s[i].Addr = w.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// address_allocator\n\n\t\t\tif v, ok := cs[\"address_allocator\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\taddressAllocatorInt := make([]*ves_io_schema.ObjectRefType, len(sl))\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.AddressAllocator = addressAllocatorInt\n\t\t\t\tfor i, ps := range sl {\n\n\t\t\t\t\taaMapToStrVal := ps.(map[string]interface{})\n\t\t\t\t\taddressAllocatorInt[i] = &ves_io_schema.ObjectRefType{}\n\n\t\t\t\t\taddressAllocatorInt[i].Kind = \"address_allocator\"\n\n\t\t\t\t\tif v, ok := aaMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\taddressAllocatorInt[i].Name = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := aaMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\taddressAllocatorInt[i].Namespace = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := aaMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\taddressAllocatorInt[i].Tenant = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := aaMapToStrVal[\"uid\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\taddressAllocatorInt[i].Uid = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// default_gateway\n\n\t\t\tif v, ok := cs[\"default_gateway\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tdefaultGateway := &ves_io_schema_network_interface.NetworkInterfaceDFGW{}\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.DefaultGateway = defaultGateway\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tdefaultGatewayMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t// default_gateway_address\n\n\t\t\t\t\tif v, ok := defaultGatewayMapStrToI[\"default_gateway_address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tdefaultGatewayAddress := &ves_io_schema.Ipv4AddressType{}\n\t\t\t\t\t\tdefaultGateway.DefaultGatewayAddress = defaultGatewayAddress\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tdefaultGatewayAddressMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// addr\n\n\t\t\t\t\t\t\tif w, ok := defaultGatewayAddressMapStrToI[\"addr\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tdefaultGatewayAddress.Addr = w.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\t// default_gateway_mode\n\n\t\t\t\t\tif v, ok := defaultGatewayMapStrToI[\"default_gateway_mode\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tdefaultGateway.DefaultGatewayMode = ves_io_schema_network_interface.NetworkInterfaceGatewayMode(ves_io_schema_network_interface.NetworkInterfaceGatewayMode_value[v.(string)])\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// device_name\n\n\t\t\tif v, ok := cs[\"device_name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.DeviceName = v.(string)\n\t\t\t}\n\n\t\t\t// dhcp_address\n\n\t\t\tif v, ok := cs[\"dhcp_address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.DhcpAddress = ves_io_schema_network_interface.NetworkInterfaceDHCP(ves_io_schema_network_interface.NetworkInterfaceDHCP_value[v.(string)])\n\n\t\t\t}\n\n\t\t\t// monitoring_choice\n\n\t\t\tmonitoringChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"monitor\"]; ok && !isIntfNil(v) && !monitoringChoiceTypeFound {\n\n\t\t\t\tmonitoringChoiceTypeFound = true\n\t\t\t\t_ = v\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"monitor_disabled\"]; ok && !isIntfNil(v) && !monitoringChoiceTypeFound {\n\n\t\t\t\tmonitoringChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tmonitoringChoiceInt := &ves_io_schema_network_interface.LegacyInterfaceType_MonitorDisabled{}\n\t\t\t\t\tmonitoringChoiceInt.MonitorDisabled = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.LegacyInterface.MonitoringChoice = monitoringChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// mtu\n\n\t\t\tif v, ok := cs[\"mtu\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.Mtu = uint32(v.(int))\n\t\t\t}\n\n\t\t\t// priority\n\n\t\t\tif v, ok := cs[\"priority\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.Priority = uint32(v.(int))\n\t\t\t}\n\n\t\t\t// static_addresses\n\n\t\t\tif v, ok := cs[\"static_addresses\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tstaticAddresses := make([]*ves_io_schema.Ipv4SubnetType, len(sl))\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.StaticAddresses = staticAddresses\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\tstaticAddresses[i] = &ves_io_schema.Ipv4SubnetType{}\n\t\t\t\t\tstaticAddressesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t// plen\n\n\t\t\t\t\tif w, ok := staticAddressesMapStrToI[\"plen\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tstaticAddresses[i].Plen = uint32(w.(int))\n\t\t\t\t\t}\n\n\t\t\t\t\t// prefix\n\n\t\t\t\t\tif w, ok := staticAddressesMapStrToI[\"prefix\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tstaticAddresses[i].Prefix = w.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// tunnel\n\n\t\t\tif v, ok := cs[\"tunnel\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\ttunnel := &ves_io_schema_network_interface.NetworkInterfaceTunnel{}\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.Tunnel = tunnel\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\ttunnelMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t// tunnel\n\n\t\t\t\t\tif v, ok := tunnelMapStrToI[\"tunnel\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\ttunnelInt := make([]*ves_io_schema.ObjectRefType, len(sl))\n\t\t\t\t\t\ttunnel.Tunnel = tunnelInt\n\t\t\t\t\t\tfor i, ps := range sl {\n\n\t\t\t\t\t\t\ttMapToStrVal := ps.(map[string]interface{})\n\t\t\t\t\t\t\ttunnelInt[i] = &ves_io_schema.ObjectRefType{}\n\n\t\t\t\t\t\t\ttunnelInt[i].Kind = \"tunnel\"\n\n\t\t\t\t\t\t\tif v, ok := tMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\ttunnelInt[i].Name = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := tMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\ttunnelInt[i].Namespace = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := tMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\ttunnelInt[i].Tenant = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := tMapToStrVal[\"uid\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\ttunnelInt[i].Uid = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// type\n\n\t\t\tif v, ok := cs[\"type\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.Type = ves_io_schema_network_interface.NetworkInterfaceType(ves_io_schema_network_interface.NetworkInterfaceType_value[v.(string)])\n\n\t\t\t}\n\n\t\t\t// virtual_network\n\n\t\t\tif v, ok := cs[\"virtual_network\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tvirtualNetworkInt := make([]*ves_io_schema.ObjectRefType, len(sl))\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.VirtualNetwork = virtualNetworkInt\n\t\t\t\tfor i, ps := range sl {\n\n\t\t\t\t\tvnMapToStrVal := ps.(map[string]interface{})\n\t\t\t\t\tvirtualNetworkInt[i] = &ves_io_schema.ObjectRefType{}\n\n\t\t\t\t\tvirtualNetworkInt[i].Kind = \"virtual_network\"\n\n\t\t\t\t\tif v, ok := vnMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tvirtualNetworkInt[i].Name = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := vnMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tvirtualNetworkInt[i].Namespace = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := vnMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tvirtualNetworkInt[i].Tenant = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := vnMapToStrVal[\"uid\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tvirtualNetworkInt[i].Uid = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// vlan_tag\n\n\t\t\tif v, ok := cs[\"vlan_tag\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.VlanTag = uint32(v.(int))\n\t\t\t}\n\n\t\t\t// vlan_tagging\n\n\t\t\tif v, ok := cs[\"vlan_tagging\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.LegacyInterface.VlanTagging = ves_io_schema_network_interface.NetworkInterfaceVLANTagging(ves_io_schema_network_interface.NetworkInterfaceVLANTagging_value[v.(string)])\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"tunnel_interface\"); ok && !interfaceChoiceTypeFound {\n\n\t\tinterfaceChoiceTypeFound = true\n\t\tinterfaceChoiceInt := &ves_io_schema_network_interface.CreateSpecType_TunnelInterface{}\n\t\tinterfaceChoiceInt.TunnelInterface = &ves_io_schema_network_interface.TunnelInterfaceType{}\n\t\tcreateSpec.InterfaceChoice = interfaceChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t// mtu\n\n\t\t\tif v, ok := cs[\"mtu\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.TunnelInterface.Mtu = uint32(v.(int))\n\t\t\t}\n\n\t\t\t// network_choice\n\n\t\t\tnetworkChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"inside_network\"]; ok && !isIntfNil(v) && !networkChoiceTypeFound {\n\n\t\t\t\tnetworkChoiceTypeFound = true\n\t\t\t\tnetworkChoiceInt := &ves_io_schema_network_interface.TunnelInterfaceType_InsideNetwork{}\n\t\t\t\tnetworkChoiceInt.InsideNetwork = &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tinterfaceChoiceInt.TunnelInterface.NetworkChoice = networkChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t// name\n\n\t\t\t\t\tif v, ok := cs[\"name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnetworkChoiceInt.InsideNetwork.Name = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\t// namespace\n\n\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnetworkChoiceInt.InsideNetwork.Namespace = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\t// tenant\n\n\t\t\t\t\tif v, ok := cs[\"tenant\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnetworkChoiceInt.InsideNetwork.Tenant = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"site_local_inside_network\"]; ok && !isIntfNil(v) && !networkChoiceTypeFound {\n\n\t\t\t\tnetworkChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnetworkChoiceInt := &ves_io_schema_network_interface.TunnelInterfaceType_SiteLocalInsideNetwork{}\n\t\t\t\t\tnetworkChoiceInt.SiteLocalInsideNetwork = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.TunnelInterface.NetworkChoice = networkChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"site_local_network\"]; ok && !isIntfNil(v) && !networkChoiceTypeFound {\n\n\t\t\t\tnetworkChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnetworkChoiceInt := &ves_io_schema_network_interface.TunnelInterfaceType_SiteLocalNetwork{}\n\t\t\t\t\tnetworkChoiceInt.SiteLocalNetwork = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.TunnelInterface.NetworkChoice = networkChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// node_choice\n\n\t\t\tnodeChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"cluster\"]; ok && !isIntfNil(v) && !nodeChoiceTypeFound {\n\n\t\t\t\tnodeChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnodeChoiceInt := &ves_io_schema_network_interface.TunnelInterfaceType_Cluster{}\n\t\t\t\t\tnodeChoiceInt.Cluster = &ves_io_schema.Empty{}\n\t\t\t\t\tinterfaceChoiceInt.TunnelInterface.NodeChoice = nodeChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"node\"]; ok && !isIntfNil(v) && !nodeChoiceTypeFound {\n\n\t\t\t\tnodeChoiceTypeFound = true\n\t\t\t\tnodeChoiceInt := &ves_io_schema_network_interface.TunnelInterfaceType_Node{}\n\n\t\t\t\tinterfaceChoiceInt.TunnelInterface.NodeChoice = nodeChoiceInt\n\n\t\t\t\tnodeChoiceInt.Node = v.(string)\n\n\t\t\t}\n\n\t\t\t// priority\n\n\t\t\tif v, ok := cs[\"priority\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tinterfaceChoiceInt.TunnelInterface.Priority = uint32(v.(int))\n\t\t\t}\n\n\t\t\t// static_ip\n\n\t\t\tif v, ok := cs[\"static_ip\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tstaticIp := &ves_io_schema_network_interface.StaticIPParametersType{}\n\t\t\t\tinterfaceChoiceInt.TunnelInterface.StaticIp = staticIp\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tstaticIpMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t// network_prefix_choice\n\n\t\t\t\t\tnetworkPrefixChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := staticIpMapStrToI[\"cluster_static_ip\"]; ok && !isIntfNil(v) && !networkPrefixChoiceTypeFound {\n\n\t\t\t\t\t\tnetworkPrefixChoiceTypeFound = true\n\t\t\t\t\t\tnetworkPrefixChoiceInt := &ves_io_schema_network_interface.StaticIPParametersType_ClusterStaticIp{}\n\t\t\t\t\t\tnetworkPrefixChoiceInt.ClusterStaticIp = &ves_io_schema_network_interface.StaticIpParametersClusterType{}\n\t\t\t\t\t\tstaticIp.NetworkPrefixChoice = networkPrefixChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// interface_ip_map\n\n\t\t\t\t\t\t\tif v, ok := cs[\"interface_ip_map\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\t\t\tinterfaceIpMap := make(map[string]*ves_io_schema_network_interface.StaticIpParametersNodeType)\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.ClusterStaticIp.InterfaceIpMap = interfaceIpMap\n\t\t\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\t\t\tinterfaceIpMapMapStrToI := set.(map[string]interface{})\n\t\t\t\t\t\t\t\t\tkey, ok := interfaceIpMapMapStrToI[\"name\"]\n\t\t\t\t\t\t\t\t\tif ok && !isIntfNil(key) {\n\t\t\t\t\t\t\t\t\t\tinterfaceIpMap[key.(string)] = &ves_io_schema_network_interface.StaticIpParametersNodeType{}\n\t\t\t\t\t\t\t\t\t\tval, _ := interfaceIpMapMapStrToI[\"value\"]\n\n\t\t\t\t\t\t\t\t\t\tinterfaceIpMapVals := val.(*schema.Set).List()\n\t\t\t\t\t\t\t\t\t\tfor _, intVal := range interfaceIpMapVals {\n\n\t\t\t\t\t\t\t\t\t\t\tinterfaceIpMapStaticMap := intVal.(map[string]interface{})\n\n\t\t\t\t\t\t\t\t\t\t\tif w, ok := interfaceIpMapStaticMap[\"default_gw\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\t\t\tinterfaceIpMap[key.(string)].DefaultGw = w.(string)\n\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\tif w, ok := interfaceIpMapStaticMap[\"dns_server\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\t\t\tinterfaceIpMap[key.(string)].DnsServer = w.(string)\n\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\tif w, ok := interfaceIpMapStaticMap[\"ip_address\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\t\t\tinterfaceIpMap[key.(string)].IpAddress = w.(string)\n\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\t// break after one loop\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := staticIpMapStrToI[\"fleet_static_ip\"]; ok && !isIntfNil(v) && !networkPrefixChoiceTypeFound {\n\n\t\t\t\t\t\tnetworkPrefixChoiceTypeFound = true\n\t\t\t\t\t\tnetworkPrefixChoiceInt := &ves_io_schema_network_interface.StaticIPParametersType_FleetStaticIp{}\n\t\t\t\t\t\tnetworkPrefixChoiceInt.FleetStaticIp = &ves_io_schema_network_interface.StaticIpParametersFleetType{}\n\t\t\t\t\t\tstaticIp.NetworkPrefixChoice = networkPrefixChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// default_gw\n\n\t\t\t\t\t\t\tif v, ok := cs[\"default_gw\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.FleetStaticIp.DefaultGw = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// dns_server\n\n\t\t\t\t\t\t\tif v, ok := cs[\"dns_server\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.FleetStaticIp.DnsServer = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// network_prefix_allocator\n\n\t\t\t\t\t\t\tif v, ok := cs[\"network_prefix_allocator\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixAllocatorInt := &ves_io_schema_views.ObjectRefType{}\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.FleetStaticIp.NetworkPrefixAllocator = networkPrefixAllocatorInt\n\n\t\t\t\t\t\t\t\tnpaMapToStrVal := v.(map[string]interface{})\n\t\t\t\t\t\t\t\tif val, ok := npaMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\t\tnetworkPrefixAllocatorInt.Name = val.(string)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif val, ok := npaMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\t\tnetworkPrefixAllocatorInt.Namespace = val.(string)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tif val, ok := npaMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\t\tnetworkPrefixAllocatorInt.Tenant = val.(string)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := staticIpMapStrToI[\"node_static_ip\"]; ok && !isIntfNil(v) && !networkPrefixChoiceTypeFound {\n\n\t\t\t\t\t\tnetworkPrefixChoiceTypeFound = true\n\t\t\t\t\t\tnetworkPrefixChoiceInt := &ves_io_schema_network_interface.StaticIPParametersType_NodeStaticIp{}\n\t\t\t\t\t\tnetworkPrefixChoiceInt.NodeStaticIp = &ves_io_schema_network_interface.StaticIpParametersNodeType{}\n\t\t\t\t\t\tstaticIp.NetworkPrefixChoice = networkPrefixChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\t// default_gw\n\n\t\t\t\t\t\t\tif v, ok := cs[\"default_gw\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NodeStaticIp.DefaultGw = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// dns_server\n\n\t\t\t\t\t\t\tif v, ok := cs[\"dns_server\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NodeStaticIp.DnsServer = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// ip_address\n\n\t\t\t\t\t\t\tif v, ok := cs[\"ip_address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\tnetworkPrefixChoiceInt.NodeStaticIp.IpAddress = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// tunnel\n\n\t\t\tif v, ok := cs[\"tunnel\"]; ok && !isIntfNil(v) {\n\n\t\t\t\ttunnelInt := &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tinterfaceChoiceInt.TunnelInterface.Tunnel = tunnelInt\n\n\t\t\t\ttMapToStrVal := v.(map[string]interface{})\n\t\t\t\tif val, ok := tMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\ttunnelInt.Name = val.(string)\n\t\t\t\t}\n\t\t\t\tif val, ok := tMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\ttunnelInt.Namespace = val.(string)\n\t\t\t\t}\n\n\t\t\t\tif val, ok := tMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\ttunnelInt.Tenant = val.(string)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating Volterra NetworkInterface object with struct: %+v\", createReq)\n\n\tcreateNetworkInterfaceResp, err := client.CreateObject(context.Background(), ves_io_schema_network_interface.ObjectType, createReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating NetworkInterface: %s\", err)\n\t}\n\td.SetId(createNetworkInterfaceResp.GetObjSystemMetadata().GetUid())\n\n\treturn resourceVolterraNetworkInterfaceRead(d, meta)\n}", "func NewWeaviateActionsPatchAccepted() *WeaviateActionsPatchAccepted {\n\n\treturn &WeaviateActionsPatchAccepted{}\n}", "func NewCreateCoreV1NamespacedPodBindingAccepted() *CreateCoreV1NamespacedPodBindingAccepted {\n\n\treturn &CreateCoreV1NamespacedPodBindingAccepted{}\n}", "func (c *SeaterController) Accepted(data interface{}) {\n\tc.Code(202)\n\tc.jsonResp(data)\n}", "func (o *CreateAuthenticationV1beta1TokenReviewAccepted) WithPayload(payload *models.IoK8sAPIAuthenticationV1beta1TokenReview) *CreateAuthenticationV1beta1TokenReviewAccepted {\n\to.Payload = payload\n\treturn o\n}", "func createOIDCIssuer(client *azureclients.AzureClientWrapper, name, region, oidcResourceGroupName, storageAccountName, blobContainerName, subscriptionID, tenantID, publicKeyPath, outputDir string, resourceTags map[string]string, dryRun bool) (string, error) {\n\t// Add CCO's \"owned\" tag to resource tags map\n\tresourceTags[fmt.Sprintf(\"%s_%s\", ownedAzureResourceTagKeyPrefix, name)] = ownedAzureResourceTagValue\n\n\tstorageAccountKey := \"\"\n\tif !dryRun {\n\t\t// Ensure that the public key file can be read at the publicKeyPath before continuing\n\t\t_, err := os.ReadFile(publicKeyPath)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"unable to read public key file\")\n\t\t}\n\n\t\t// Ensure the resource group exists\n\t\terr = ensureResourceGroup(client, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure resource group\")\n\t\t}\n\n\t\t// Ensure storage account exists\n\t\terr = ensureStorageAccount(client, storageAccountName, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure storage account\")\n\t\t}\n\n\t\tstorageAccountKey, err = getStorageAccountKey(client, storageAccountName, oidcResourceGroupName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to get storage account key\")\n\t\t}\n\n\t\t// Ensure blob container exists\n\t\terr = ensureBlobContainer(client, oidcResourceGroupName, storageAccountName, blobContainerName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to create blob container\")\n\t\t}\n\t}\n\n\t// Upload OIDC documents (openid-configuration, jwks.json) to the blob container\n\toutputDirAbsPath, err := filepath.Abs(outputDir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tissuerURL, err := uploadOIDCDocuments(client, storageAccountName, storageAccountKey, publicKeyPath, blobContainerName, outputDirAbsPath, dryRun, resourceTags)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to upload OIDC documents\")\n\t}\n\n\t// Write cluster authentication object installer manifest cluster-authentication-02-config.yaml\n\t// for our issuerURL within outputDir/manifests\n\tif err = provisioning.CreateClusterAuthentication(issuerURL, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create cluster authentication manifest\")\n\t}\n\n\t// Write Azure AD pod identity webhook config secret azure-ad-pod-identity-webhook-config.yaml\n\t// within outputDir/manifests\n\tif err = createPodIdentityWebhookConfigSecret(tenantID, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create Azure AD pod identity webhook manifest\")\n\t}\n\n\treturn issuerURL, nil\n}", "func (client *ReplicationvCentersClient) createCreateRequest(ctx context.Context, fabricName string, vcenterName string, addVCenterRequest AddVCenterRequest, options *ReplicationvCentersClientBeginCreateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationvCenters/{vcenterName}\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif fabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter fabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fabricName}\", url.PathEscape(fabricName))\n\tif vcenterName == \"\" {\n\t\treturn nil, errors.New(\"parameter vcenterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vcenterName}\", url.PathEscape(vcenterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, addVCenterRequest)\n}", "func (client StorageGatewayClient) createStorageGateway(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateStorageGatewayResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *StorageTargetsClient) restoreDefaultsCreateRequest(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, options *StorageTargetsClientBeginRestoreDefaultsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}/restoreDefaults\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif cacheName == \"\" {\n\t\treturn nil, errors.New(\"parameter cacheName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cacheName}\", url.PathEscape(cacheName))\n\tif storageTargetName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageTargetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageTargetName}\", url.PathEscape(storageTargetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (s service) CreateInterest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tctx, span := trace.StartSpan(ctx, \"interestcal.service.CreateInterest\")\n\tdefer span.End()\n\treqlog.Dump(r, \"interestcal.CreateInterest\")\n\tvar cireq interestvalue.CreateInterestRequest\n\tif err := jsonfmt.Decode(r, &cireq); err != nil {\n\t\treturn errors.Wrap(err, \"decoding new interest calculation request with banks and deposits\")\n\t}\n\ts.log.Printf(\"\\nDecoded json is %+v\\n\", cireq)\n\ts.log.Println(\"Starting interest and 30day interest calculations(also called as delta)\")\n\tciresp, err := cireq.CalculateDelta()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating new interest calculations\")\n\t}\n\n\treturn jsonfmt.Respond(ctx, w, &ciresp, http.StatusCreated)\n}", "func (suite *TestHarnessAPISuite) TestNewDefaultBuilderNoAcceptHeader() {\n\treq := httptest.NewRequest(\"POST\", \"/build/DefaultMove\", nil)\n\tchiRouteCtx := chi.NewRouteContext()\n\tchiRouteCtx.URLParams.Add(\"action\", \"DefaultMove\")\n\treq = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, chiRouteCtx))\n\trr := httptest.NewRecorder()\n\thandler := NewDefaultBuilder(suite.HandlerConfig())\n\thandler.ServeHTTP(rr, req)\n\n\tsuite.Equal(http.StatusOK, rr.Code)\n\tsuite.Equal(\"application/json\", rr.Header().Get(\"Content-type\"))\n}", "func (client *ReplicationvCentersClient) listCreateRequest(ctx context.Context, options *ReplicationvCentersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationvCenters\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func Accepted(data Serializer, logging ...interface{}) Response {\n\tif data == nil {\n\t\tdata = String(\"202 Accepted\")\n\t}\n\treturn Response{Status: http.StatusAccepted, Data: data, Logging: logging}\n}", "func (m *_BACnetUnconfirmedServiceRequestUnconfirmedPrivateTransfer) InitializeParent(parent BACnetUnconfirmedServiceRequest) {\n}", "func (o *PostManagementKubernetesIoV1NodesAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(202)\n}", "func NewCreateAuthorizationV1SelfSubjectAccessReviewAccepted() *CreateAuthorizationV1SelfSubjectAccessReviewAccepted {\n\treturn &CreateAuthorizationV1SelfSubjectAccessReviewAccepted{}\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tctx context.Context,\n\tr *resource,\n) (*svcsdk.CreateVpnConnectionInput, error) {\n\tres := &svcsdk.CreateVpnConnectionInput{}\n\n\tif r.ko.Spec.CustomerGatewayID != nil {\n\t\tres.SetCustomerGatewayId(*r.ko.Spec.CustomerGatewayID)\n\t}\n\tif r.ko.Spec.DryRun != nil {\n\t\tres.SetDryRun(*r.ko.Spec.DryRun)\n\t}\n\tif r.ko.Spec.Options != nil {\n\t\tf2 := &svcsdk.VpnConnectionOptionsSpecification{}\n\t\tif r.ko.Spec.Options.EnableAcceleration != nil {\n\t\t\tf2.SetEnableAcceleration(*r.ko.Spec.Options.EnableAcceleration)\n\t\t}\n\t\tif r.ko.Spec.Options.LocalIPv4NetworkCIDR != nil {\n\t\t\tf2.SetLocalIpv4NetworkCidr(*r.ko.Spec.Options.LocalIPv4NetworkCIDR)\n\t\t}\n\t\tif r.ko.Spec.Options.LocalIPv6NetworkCIDR != nil {\n\t\t\tf2.SetLocalIpv6NetworkCidr(*r.ko.Spec.Options.LocalIPv6NetworkCIDR)\n\t\t}\n\t\tif r.ko.Spec.Options.RemoteIPv4NetworkCIDR != nil {\n\t\t\tf2.SetRemoteIpv4NetworkCidr(*r.ko.Spec.Options.RemoteIPv4NetworkCIDR)\n\t\t}\n\t\tif r.ko.Spec.Options.RemoteIPv6NetworkCIDR != nil {\n\t\t\tf2.SetRemoteIpv6NetworkCidr(*r.ko.Spec.Options.RemoteIPv6NetworkCIDR)\n\t\t}\n\t\tif r.ko.Spec.Options.StaticRoutesOnly != nil {\n\t\t\tf2.SetStaticRoutesOnly(*r.ko.Spec.Options.StaticRoutesOnly)\n\t\t}\n\t\tif r.ko.Spec.Options.TunnelInsideIPVersion != nil {\n\t\t\tf2.SetTunnelInsideIpVersion(*r.ko.Spec.Options.TunnelInsideIPVersion)\n\t\t}\n\t\tif r.ko.Spec.Options.TunnelOptions != nil {\n\t\t\tf2f7 := []*svcsdk.VpnTunnelOptionsSpecification{}\n\t\t\tfor _, f2f7iter := range r.ko.Spec.Options.TunnelOptions {\n\t\t\t\tf2f7elem := &svcsdk.VpnTunnelOptionsSpecification{}\n\t\t\t\tif f2f7iter.DPDTimeoutAction != nil {\n\t\t\t\t\tf2f7elem.SetDPDTimeoutAction(*f2f7iter.DPDTimeoutAction)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.DPDTimeoutSeconds != nil {\n\t\t\t\t\tf2f7elem.SetDPDTimeoutSeconds(*f2f7iter.DPDTimeoutSeconds)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.IKEVersions != nil {\n\t\t\t\t\tf2f7elemf2 := []*svcsdk.IKEVersionsRequestListValue{}\n\t\t\t\t\tfor _, f2f7elemf2iter := range f2f7iter.IKEVersions {\n\t\t\t\t\t\tf2f7elemf2elem := &svcsdk.IKEVersionsRequestListValue{}\n\t\t\t\t\t\tif f2f7elemf2iter.Value != nil {\n\t\t\t\t\t\t\tf2f7elemf2elem.SetValue(*f2f7elemf2iter.Value)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf2f7elemf2 = append(f2f7elemf2, f2f7elemf2elem)\n\t\t\t\t\t}\n\t\t\t\t\tf2f7elem.SetIKEVersions(f2f7elemf2)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.Phase1DHGroupNumbers != nil {\n\t\t\t\t\tf2f7elemf3 := []*svcsdk.Phase1DHGroupNumbersRequestListValue{}\n\t\t\t\t\tfor _, f2f7elemf3iter := range f2f7iter.Phase1DHGroupNumbers {\n\t\t\t\t\t\tf2f7elemf3elem := &svcsdk.Phase1DHGroupNumbersRequestListValue{}\n\t\t\t\t\t\tif f2f7elemf3iter.Value != nil {\n\t\t\t\t\t\t\tf2f7elemf3elem.SetValue(*f2f7elemf3iter.Value)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf2f7elemf3 = append(f2f7elemf3, f2f7elemf3elem)\n\t\t\t\t\t}\n\t\t\t\t\tf2f7elem.SetPhase1DHGroupNumbers(f2f7elemf3)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.Phase1EncryptionAlgorithms != nil {\n\t\t\t\t\tf2f7elemf4 := []*svcsdk.Phase1EncryptionAlgorithmsRequestListValue{}\n\t\t\t\t\tfor _, f2f7elemf4iter := range f2f7iter.Phase1EncryptionAlgorithms {\n\t\t\t\t\t\tf2f7elemf4elem := &svcsdk.Phase1EncryptionAlgorithmsRequestListValue{}\n\t\t\t\t\t\tif f2f7elemf4iter.Value != nil {\n\t\t\t\t\t\t\tf2f7elemf4elem.SetValue(*f2f7elemf4iter.Value)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf2f7elemf4 = append(f2f7elemf4, f2f7elemf4elem)\n\t\t\t\t\t}\n\t\t\t\t\tf2f7elem.SetPhase1EncryptionAlgorithms(f2f7elemf4)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.Phase1IntegrityAlgorithms != nil {\n\t\t\t\t\tf2f7elemf5 := []*svcsdk.Phase1IntegrityAlgorithmsRequestListValue{}\n\t\t\t\t\tfor _, f2f7elemf5iter := range f2f7iter.Phase1IntegrityAlgorithms {\n\t\t\t\t\t\tf2f7elemf5elem := &svcsdk.Phase1IntegrityAlgorithmsRequestListValue{}\n\t\t\t\t\t\tif f2f7elemf5iter.Value != nil {\n\t\t\t\t\t\t\tf2f7elemf5elem.SetValue(*f2f7elemf5iter.Value)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf2f7elemf5 = append(f2f7elemf5, f2f7elemf5elem)\n\t\t\t\t\t}\n\t\t\t\t\tf2f7elem.SetPhase1IntegrityAlgorithms(f2f7elemf5)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.Phase1LifetimeSeconds != nil {\n\t\t\t\t\tf2f7elem.SetPhase1LifetimeSeconds(*f2f7iter.Phase1LifetimeSeconds)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.Phase2DHGroupNumbers != nil {\n\t\t\t\t\tf2f7elemf7 := []*svcsdk.Phase2DHGroupNumbersRequestListValue{}\n\t\t\t\t\tfor _, f2f7elemf7iter := range f2f7iter.Phase2DHGroupNumbers {\n\t\t\t\t\t\tf2f7elemf7elem := &svcsdk.Phase2DHGroupNumbersRequestListValue{}\n\t\t\t\t\t\tif f2f7elemf7iter.Value != nil {\n\t\t\t\t\t\t\tf2f7elemf7elem.SetValue(*f2f7elemf7iter.Value)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf2f7elemf7 = append(f2f7elemf7, f2f7elemf7elem)\n\t\t\t\t\t}\n\t\t\t\t\tf2f7elem.SetPhase2DHGroupNumbers(f2f7elemf7)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.Phase2EncryptionAlgorithms != nil {\n\t\t\t\t\tf2f7elemf8 := []*svcsdk.Phase2EncryptionAlgorithmsRequestListValue{}\n\t\t\t\t\tfor _, f2f7elemf8iter := range f2f7iter.Phase2EncryptionAlgorithms {\n\t\t\t\t\t\tf2f7elemf8elem := &svcsdk.Phase2EncryptionAlgorithmsRequestListValue{}\n\t\t\t\t\t\tif f2f7elemf8iter.Value != nil {\n\t\t\t\t\t\t\tf2f7elemf8elem.SetValue(*f2f7elemf8iter.Value)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf2f7elemf8 = append(f2f7elemf8, f2f7elemf8elem)\n\t\t\t\t\t}\n\t\t\t\t\tf2f7elem.SetPhase2EncryptionAlgorithms(f2f7elemf8)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.Phase2IntegrityAlgorithms != nil {\n\t\t\t\t\tf2f7elemf9 := []*svcsdk.Phase2IntegrityAlgorithmsRequestListValue{}\n\t\t\t\t\tfor _, f2f7elemf9iter := range f2f7iter.Phase2IntegrityAlgorithms {\n\t\t\t\t\t\tf2f7elemf9elem := &svcsdk.Phase2IntegrityAlgorithmsRequestListValue{}\n\t\t\t\t\t\tif f2f7elemf9iter.Value != nil {\n\t\t\t\t\t\t\tf2f7elemf9elem.SetValue(*f2f7elemf9iter.Value)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf2f7elemf9 = append(f2f7elemf9, f2f7elemf9elem)\n\t\t\t\t\t}\n\t\t\t\t\tf2f7elem.SetPhase2IntegrityAlgorithms(f2f7elemf9)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.Phase2LifetimeSeconds != nil {\n\t\t\t\t\tf2f7elem.SetPhase2LifetimeSeconds(*f2f7iter.Phase2LifetimeSeconds)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.PreSharedKey != nil {\n\t\t\t\t\tf2f7elem.SetPreSharedKey(*f2f7iter.PreSharedKey)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.RekeyFuzzPercentage != nil {\n\t\t\t\t\tf2f7elem.SetRekeyFuzzPercentage(*f2f7iter.RekeyFuzzPercentage)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.RekeyMarginTimeSeconds != nil {\n\t\t\t\t\tf2f7elem.SetRekeyMarginTimeSeconds(*f2f7iter.RekeyMarginTimeSeconds)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.ReplayWindowSize != nil {\n\t\t\t\t\tf2f7elem.SetReplayWindowSize(*f2f7iter.ReplayWindowSize)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.StartupAction != nil {\n\t\t\t\t\tf2f7elem.SetStartupAction(*f2f7iter.StartupAction)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.TunnelInsideCIDR != nil {\n\t\t\t\t\tf2f7elem.SetTunnelInsideCidr(*f2f7iter.TunnelInsideCIDR)\n\t\t\t\t}\n\t\t\t\tif f2f7iter.TunnelInsideIPv6CIDR != nil {\n\t\t\t\t\tf2f7elem.SetTunnelInsideIpv6Cidr(*f2f7iter.TunnelInsideIPv6CIDR)\n\t\t\t\t}\n\t\t\t\tf2f7 = append(f2f7, f2f7elem)\n\t\t\t}\n\t\t\tf2.SetTunnelOptions(f2f7)\n\t\t}\n\t\tres.SetOptions(f2)\n\t}\n\tif r.ko.Spec.TagSpecifications != nil {\n\t\tf3 := []*svcsdk.TagSpecification{}\n\t\tfor _, f3iter := range r.ko.Spec.TagSpecifications {\n\t\t\tf3elem := &svcsdk.TagSpecification{}\n\t\t\tif f3iter.ResourceType != nil {\n\t\t\t\tf3elem.SetResourceType(*f3iter.ResourceType)\n\t\t\t}\n\t\t\tif f3iter.Tags != nil {\n\t\t\t\tf3elemf1 := []*svcsdk.Tag{}\n\t\t\t\tfor _, f3elemf1iter := range f3iter.Tags {\n\t\t\t\t\tf3elemf1elem := &svcsdk.Tag{}\n\t\t\t\t\tif f3elemf1iter.Key != nil {\n\t\t\t\t\t\tf3elemf1elem.SetKey(*f3elemf1iter.Key)\n\t\t\t\t\t}\n\t\t\t\t\tif f3elemf1iter.Value != nil {\n\t\t\t\t\t\tf3elemf1elem.SetValue(*f3elemf1iter.Value)\n\t\t\t\t\t}\n\t\t\t\t\tf3elemf1 = append(f3elemf1, f3elemf1elem)\n\t\t\t\t}\n\t\t\t\tf3elem.SetTags(f3elemf1)\n\t\t\t}\n\t\t\tf3 = append(f3, f3elem)\n\t\t}\n\t\tres.SetTagSpecifications(f3)\n\t}\n\tif r.ko.Spec.TransitGatewayID != nil {\n\t\tres.SetTransitGatewayId(*r.ko.Spec.TransitGatewayID)\n\t}\n\tif r.ko.Spec.Type != nil {\n\t\tres.SetType(*r.ko.Spec.Type)\n\t}\n\tif r.ko.Spec.VPNGatewayID != nil {\n\t\tres.SetVpnGatewayId(*r.ko.Spec.VPNGatewayID)\n\t}\n\n\treturn res, nil\n}", "func (o *CreateAuthenticationV1beta1TokenReviewAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func TestCreateTransferNotMine(t *testing.T) {\n\ttesttrans := []byte(`{\"To\": \"[email protected]\",\"Status\": \"pending\"}`)\n\n\treq, _ := http.NewRequest(\"POST\", \"/certificates/c001/transfers/create\", bytes.NewBuffer(testtrans))\n\treq.SetBasicAuth(\"vvg01\", \"vwh39043f\")\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusUnauthorized, response.Code)\n\n}", "func NewCreateApiregistrationV1beta1APIServiceAccepted() *CreateApiregistrationV1beta1APIServiceAccepted {\n\treturn &CreateApiregistrationV1beta1APIServiceAccepted{}\n}", "func CreateDescribeGatewayFileSharesRequest() (request *DescribeGatewayFileSharesRequest) {\n\trequest = &DescribeGatewayFileSharesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"sgw\", \"2018-05-11\", \"DescribeGatewayFileShares\", \"hcs_sgw\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func NewRegisterUserAccepted() *RegisterUserAccepted {\n\treturn &RegisterUserAccepted{}\n}", "func resourceVolterraVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tcreateMeta := &ves_io_schema.ObjectCreateMetaType{}\n\tcreateSpec := &ves_io_schema_virtual_network.CreateSpecType{}\n\tcreateReq := &ves_io_schema_virtual_network.CreateRequest{\n\t\tMetadata: createMeta,\n\t\tSpec: createSpec,\n\t}\n\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\t//network_choice\n\n\tnetworkChoiceTypeFound := false\n\n\tif v, ok := d.GetOk(\"global_network\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\n\t\tif v.(bool) {\n\t\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.CreateSpecType_GlobalNetwork{}\n\t\t\tnetworkChoiceInt.GlobalNetwork = &ves_io_schema.Empty{}\n\t\t\tcreateSpec.NetworkChoice = networkChoiceInt\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"legacy_type\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.CreateSpecType_LegacyType{}\n\n\t\tcreateSpec.NetworkChoice = networkChoiceInt\n\n\t\tnetworkChoiceInt.LegacyType = ves_io_schema.VirtualNetworkType(ves_io_schema.VirtualNetworkType_value[v.(string)])\n\n\t}\n\n\tif v, ok := d.GetOk(\"site_local_inside_network\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\n\t\tif v.(bool) {\n\t\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.CreateSpecType_SiteLocalInsideNetwork{}\n\t\t\tnetworkChoiceInt.SiteLocalInsideNetwork = &ves_io_schema.Empty{}\n\t\t\tcreateSpec.NetworkChoice = networkChoiceInt\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"site_local_network\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\n\t\tif v.(bool) {\n\t\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.CreateSpecType_SiteLocalNetwork{}\n\t\t\tnetworkChoiceInt.SiteLocalNetwork = &ves_io_schema.Empty{}\n\t\t\tcreateSpec.NetworkChoice = networkChoiceInt\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"srv6_network\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.CreateSpecType_Srv6Network{}\n\t\tnetworkChoiceInt.Srv6Network = &ves_io_schema_virtual_network.PerSiteSrv6NetworkType{}\n\t\tcreateSpec.NetworkChoice = networkChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\tif v, ok := cs[\"access_network_rtargets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\taccessNetworkRtargets := make([]*ves_io_schema.RouteTarget, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.AccessNetworkRtargets = accessNetworkRtargets\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\taccessNetworkRtargets[i] = &ves_io_schema.RouteTarget{}\n\t\t\t\t\taccessNetworkRtargetsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\trtargetChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := accessNetworkRtargetsMapStrToI[\"asn2byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn2ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget = &ves_io_schema.RouteTarget2ByteAsn{}\n\t\t\t\t\t\taccessNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := accessNetworkRtargetsMapStrToI[\"asn4byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn4ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget = &ves_io_schema.RouteTarget4ByteAsn{}\n\t\t\t\t\t\taccessNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := accessNetworkRtargetsMapStrToI[\"ipv4_addr_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Ipv4AddrRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget = &ves_io_schema.RouteTargetIPv4Addr{}\n\t\t\t\t\t\taccessNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Address = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tdefaultVipChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"anycast_vip\"]; ok && !isIntfNil(v) && !defaultVipChoiceTypeFound {\n\n\t\t\t\tdefaultVipChoiceTypeFound = true\n\t\t\t\tdefaultVipChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_AnycastVip{}\n\n\t\t\t\tnetworkChoiceInt.Srv6Network.DefaultVipChoice = defaultVipChoiceInt\n\n\t\t\t\tdefaultVipChoiceInt.AnycastVip = v.(string)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"fleet_vip\"]; ok && !isIntfNil(v) && !defaultVipChoiceTypeFound {\n\n\t\t\t\tdefaultVipChoiceTypeFound = true\n\t\t\t\tdefaultVipChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_FleetVip{}\n\t\t\t\tdefaultVipChoiceInt.FleetVip = &ves_io_schema_virtual_network.AnyCastVIPFleetType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.DefaultVipChoice = defaultVipChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"vip_allocator\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tvipAllocatorInt := &ves_io_schema_views.ObjectRefType{}\n\t\t\t\t\t\tdefaultVipChoiceInt.FleetVip.VipAllocator = vipAllocatorInt\n\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tvaMapToStrVal := set.(map[string]interface{})\n\t\t\t\t\t\t\tif val, ok := vaMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tvipAllocatorInt.Name = val.(string)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif val, ok := vaMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tvipAllocatorInt.Namespace = val.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif val, ok := vaMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tvipAllocatorInt.Tenant = val.(string)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"interface_ip_vip\"]; ok && !isIntfNil(v) && !defaultVipChoiceTypeFound {\n\n\t\t\t\tdefaultVipChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tdefaultVipChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_InterfaceIpVip{}\n\t\t\t\t\tdefaultVipChoiceInt.InterfaceIpVip = &ves_io_schema.Empty{}\n\t\t\t\t\tnetworkChoiceInt.Srv6Network.DefaultVipChoice = defaultVipChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"enterprise_network_rtargets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tenterpriseNetworkRtargets := make([]*ves_io_schema.RouteTarget, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.EnterpriseNetworkRtargets = enterpriseNetworkRtargets\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\tenterpriseNetworkRtargets[i] = &ves_io_schema.RouteTarget{}\n\t\t\t\t\tenterpriseNetworkRtargetsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\trtargetChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := enterpriseNetworkRtargetsMapStrToI[\"asn2byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn2ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget = &ves_io_schema.RouteTarget2ByteAsn{}\n\t\t\t\t\t\tenterpriseNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := enterpriseNetworkRtargetsMapStrToI[\"asn4byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn4ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget = &ves_io_schema.RouteTarget4ByteAsn{}\n\t\t\t\t\t\tenterpriseNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := enterpriseNetworkRtargetsMapStrToI[\"ipv4_addr_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Ipv4AddrRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget = &ves_io_schema.RouteTargetIPv4Addr{}\n\t\t\t\t\t\tenterpriseNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Address = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"export_rtargets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\texportRtargets := make([]*ves_io_schema.RouteTarget, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.ExportRtargets = exportRtargets\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\texportRtargets[i] = &ves_io_schema.RouteTarget{}\n\t\t\t\t\texportRtargetsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\trtargetChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := exportRtargetsMapStrToI[\"asn2byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn2ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget = &ves_io_schema.RouteTarget2ByteAsn{}\n\t\t\t\t\t\texportRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := exportRtargetsMapStrToI[\"asn4byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn4ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget = &ves_io_schema.RouteTarget4ByteAsn{}\n\t\t\t\t\t\texportRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := exportRtargetsMapStrToI[\"ipv4_addr_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Ipv4AddrRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget = &ves_io_schema.RouteTargetIPv4Addr{}\n\t\t\t\t\t\texportRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Address = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"fleets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tfleetsInt := make([]*ves_io_schema_views.ObjectRefType, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.Fleets = fleetsInt\n\t\t\t\tfor i, ps := range sl {\n\n\t\t\t\t\tfMapToStrVal := ps.(map[string]interface{})\n\t\t\t\t\tfleetsInt[i] = &ves_io_schema_views.ObjectRefType{}\n\n\t\t\t\t\tif v, ok := fMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tfleetsInt[i].Name = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := fMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tfleetsInt[i].Namespace = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := fMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tfleetsInt[i].Tenant = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"internet_rtargets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tinternetRtargets := make([]*ves_io_schema.RouteTarget, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.InternetRtargets = internetRtargets\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\tinternetRtargets[i] = &ves_io_schema.RouteTarget{}\n\t\t\t\t\tinternetRtargetsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\trtargetChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := internetRtargetsMapStrToI[\"asn2byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn2ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget = &ves_io_schema.RouteTarget2ByteAsn{}\n\t\t\t\t\t\tinternetRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := internetRtargetsMapStrToI[\"asn4byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn4ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget = &ves_io_schema.RouteTarget4ByteAsn{}\n\t\t\t\t\t\tinternetRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := internetRtargetsMapStrToI[\"ipv4_addr_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Ipv4AddrRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget = &ves_io_schema.RouteTargetIPv4Addr{}\n\t\t\t\t\t\tinternetRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Address = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tnamespaceChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_namespace_network\"]; ok && !isIntfNil(v) && !namespaceChoiceTypeFound {\n\n\t\t\t\tnamespaceChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnamespaceChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_NoNamespaceNetwork{}\n\t\t\t\t\tnamespaceChoiceInt.NoNamespaceNetwork = &ves_io_schema.Empty{}\n\t\t\t\t\tnetworkChoiceInt.Srv6Network.NamespaceChoice = namespaceChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"srv6_network_ns_params\"]; ok && !isIntfNil(v) && !namespaceChoiceTypeFound {\n\n\t\t\t\tnamespaceChoiceTypeFound = true\n\t\t\t\tnamespaceChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_Srv6NetworkNsParams{}\n\t\t\t\tnamespaceChoiceInt.Srv6NetworkNsParams = &ves_io_schema_virtual_network.Srv6NetworkNsParametersType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.NamespaceChoice = namespaceChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnamespaceChoiceInt.Srv6NetworkNsParams.Namespace = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"remote_sid_stats_plen\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tnetworkChoiceInt.Srv6Network.RemoteSidStatsPlen = uint32(v.(int))\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"slice\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tsliceInt := &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.Slice = sliceInt\n\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tsMapToStrVal := set.(map[string]interface{})\n\t\t\t\t\tif val, ok := sMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tsliceInt.Name = val.(string)\n\t\t\t\t\t}\n\t\t\t\t\tif val, ok := sMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tsliceInt.Namespace = val.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif val, ok := sMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tsliceInt.Tenant = val.(string)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tsnatPoolChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"fleet_snat_pool\"]; ok && !isIntfNil(v) && !snatPoolChoiceTypeFound {\n\n\t\t\t\tsnatPoolChoiceTypeFound = true\n\t\t\t\tsnatPoolChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_FleetSnatPool{}\n\t\t\t\tsnatPoolChoiceInt.FleetSnatPool = &ves_io_schema_virtual_network.SNATPoolFleetType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.SnatPoolChoice = snatPoolChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"snat_pool_allocator\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tsnatPoolAllocatorInt := &ves_io_schema_views.ObjectRefType{}\n\t\t\t\t\t\tsnatPoolChoiceInt.FleetSnatPool.SnatPoolAllocator = snatPoolAllocatorInt\n\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tspaMapToStrVal := set.(map[string]interface{})\n\t\t\t\t\t\t\tif val, ok := spaMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tsnatPoolAllocatorInt.Name = val.(string)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif val, ok := spaMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tsnatPoolAllocatorInt.Namespace = val.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif val, ok := spaMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tsnatPoolAllocatorInt.Tenant = val.(string)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"interface_ip_snat_pool\"]; ok && !isIntfNil(v) && !snatPoolChoiceTypeFound {\n\n\t\t\t\tsnatPoolChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tsnatPoolChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_InterfaceIpSnatPool{}\n\t\t\t\t\tsnatPoolChoiceInt.InterfaceIpSnatPool = &ves_io_schema.Empty{}\n\t\t\t\t\tnetworkChoiceInt.Srv6Network.SnatPoolChoice = snatPoolChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"site_snat_pool\"]; ok && !isIntfNil(v) && !snatPoolChoiceTypeFound {\n\n\t\t\t\tsnatPoolChoiceTypeFound = true\n\t\t\t\tsnatPoolChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_SiteSnatPool{}\n\t\t\t\tsnatPoolChoiceInt.SiteSnatPool = &ves_io_schema_virtual_network.SNATPoolSiteType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.SnatPoolChoice = snatPoolChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"node_snat_pool\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tnodeSnatPool := make(map[string]*ves_io_schema_virtual_network.SNATPoolType)\n\t\t\t\t\t\tsnatPoolChoiceInt.SiteSnatPool.NodeSnatPool = nodeSnatPool\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tnodeSnatPoolMapStrToI := set.(map[string]interface{})\n\t\t\t\t\t\t\tkey, ok := nodeSnatPoolMapStrToI[\"name\"]\n\t\t\t\t\t\t\tif ok && !isIntfNil(key) {\n\t\t\t\t\t\t\t\tnodeSnatPool[key.(string)] = &ves_io_schema_virtual_network.SNATPoolType{}\n\t\t\t\t\t\t\t\tval, _ := nodeSnatPoolMapStrToI[\"value\"]\n\n\t\t\t\t\t\t\t\tnodeSnatPoolVals := val.(*schema.Set).List()\n\t\t\t\t\t\t\t\tfor _, intVal := range nodeSnatPoolVals {\n\n\t\t\t\t\t\t\t\t\tnodeSnatPoolStaticMap := intVal.(map[string]interface{})\n\n\t\t\t\t\t\t\t\t\tif w, ok := nodeSnatPoolStaticMap[\"ipv4_prefixes\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\tnodeSnatPool[key.(string)].Ipv4Prefixes = nil\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\tif w, ok := nodeSnatPoolStaticMap[\"ipv6_prefixes\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\tnodeSnatPool[key.(string)].Ipv6Prefixes = nil\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t// break after one loop\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\t//static_routes\n\tif v, ok := d.GetOk(\"static_routes\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tstaticRoutes := make([]*ves_io_schema_virtual_network.StaticRouteViewType, len(sl))\n\t\tcreateSpec.StaticRoutes = staticRoutes\n\t\tfor i, set := range sl {\n\t\t\tstaticRoutes[i] = &ves_io_schema_virtual_network.StaticRouteViewType{}\n\t\t\tstaticRoutesMapStrToI := set.(map[string]interface{})\n\n\t\t\tif v, ok := staticRoutesMapStrToI[\"attrs\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tattrsList := []ves_io_schema.RouteAttrType{}\n\t\t\t\tfor _, j := range v.([]interface{}) {\n\t\t\t\t\tattrsList = append(attrsList, ves_io_schema.RouteAttrType(ves_io_schema.RouteAttrType_value[j.(string)]))\n\t\t\t\t}\n\t\t\t\tstaticRoutes[i].Attrs = attrsList\n\n\t\t\t}\n\n\t\t\tif w, ok := staticRoutesMapStrToI[\"ip_prefixes\"]; ok && !isIntfNil(w) {\n\t\t\t\tls := make([]string, len(w.([]interface{})))\n\t\t\t\tfor i, v := range w.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tstaticRoutes[i].IpPrefixes = ls\n\t\t\t}\n\n\t\t\tnextHopChoiceTypeFound := false\n\n\t\t\tif v, ok := staticRoutesMapStrToI[\"default_gateway\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticRouteViewType_DefaultGateway{}\n\t\t\t\t\tnextHopChoiceInt.DefaultGateway = &ves_io_schema.Empty{}\n\t\t\t\t\tstaticRoutes[i].NextHopChoice = nextHopChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := staticRoutesMapStrToI[\"interface\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticRouteViewType_Interface{}\n\t\t\t\tnextHopChoiceInt.Interface = &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tstaticRoutes[i].NextHopChoice = nextHopChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Name = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Namespace = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"tenant\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Tenant = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := staticRoutesMapStrToI[\"ip_address\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticRouteViewType_IpAddress{}\n\n\t\t\t\tstaticRoutes[i].NextHopChoice = nextHopChoiceInt\n\n\t\t\t\tnextHopChoiceInt.IpAddress = v.(string)\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\t//static_v6_routes\n\tif v, ok := d.GetOk(\"static_v6_routes\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tstaticV6Routes := make([]*ves_io_schema_virtual_network.StaticV6RouteViewType, len(sl))\n\t\tcreateSpec.StaticV6Routes = staticV6Routes\n\t\tfor i, set := range sl {\n\t\t\tstaticV6Routes[i] = &ves_io_schema_virtual_network.StaticV6RouteViewType{}\n\t\t\tstaticV6RoutesMapStrToI := set.(map[string]interface{})\n\n\t\t\tif v, ok := staticV6RoutesMapStrToI[\"attrs\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tattrsList := []ves_io_schema.RouteAttrType{}\n\t\t\t\tfor _, j := range v.([]interface{}) {\n\t\t\t\t\tattrsList = append(attrsList, ves_io_schema.RouteAttrType(ves_io_schema.RouteAttrType_value[j.(string)]))\n\t\t\t\t}\n\t\t\t\tstaticV6Routes[i].Attrs = attrsList\n\n\t\t\t}\n\n\t\t\tif w, ok := staticV6RoutesMapStrToI[\"ip_prefixes\"]; ok && !isIntfNil(w) {\n\t\t\t\tls := make([]string, len(w.([]interface{})))\n\t\t\t\tfor i, v := range w.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tstaticV6Routes[i].IpPrefixes = ls\n\t\t\t}\n\n\t\t\tnextHopChoiceTypeFound := false\n\n\t\t\tif v, ok := staticV6RoutesMapStrToI[\"default_gateway\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticV6RouteViewType_DefaultGateway{}\n\t\t\t\t\tnextHopChoiceInt.DefaultGateway = &ves_io_schema.Empty{}\n\t\t\t\t\tstaticV6Routes[i].NextHopChoice = nextHopChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := staticV6RoutesMapStrToI[\"interface\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticV6RouteViewType_Interface{}\n\t\t\t\tnextHopChoiceInt.Interface = &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tstaticV6Routes[i].NextHopChoice = nextHopChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Name = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Namespace = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"tenant\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Tenant = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := staticV6RoutesMapStrToI[\"ip_address\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticV6RouteViewType_IpAddress{}\n\n\t\t\t\tstaticV6Routes[i].NextHopChoice = nextHopChoiceInt\n\n\t\t\t\tnextHopChoiceInt.IpAddress = v.(string)\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating Volterra VirtualNetwork object with struct: %+v\", createReq)\n\n\tcreateVirtualNetworkResp, err := client.CreateObject(context.Background(), ves_io_schema_virtual_network.ObjectType, createReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating VirtualNetwork: %s\", err)\n\t}\n\td.SetId(createVirtualNetworkResp.GetObjSystemMetadata().GetUid())\n\n\treturn resourceVolterraVirtualNetworkRead(d, meta)\n}", "func (t CreateAclsRequest) Encode(e *Encoder, version int16) {\n\t// Creations\n\tlen0 := len(t.Creations)\n\te.PutArrayLength(len0)\n\tfor i := 0; i < len0; i++ {\n\t\tt.Creations[i].Encode(e, version)\n\t}\n}", "func (client IdentityClient) CreateTagDefault(ctx context.Context, request CreateTagDefaultRequest) (response CreateTagDefaultResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createTagDefault, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateTagDefaultResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateTagDefaultResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateTagDefaultResponse); ok {\n\t\tcommon.EcContext.UpdateEndOfWindow(time.Duration(240 * time.Second))\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateTagDefaultResponse\")\n\t}\n\treturn\n}", "func InitiateContractOnNewBlock(\n\tcreator crypto.PublicKey,\n\tnonce string,\n\tcontractCode []byte,\n\tgas uint64,\n\tprepaid uint64,\n\tstorageLimit uint64,\n) (addr string, remainingGas uint64, err error) {\n\taddress := getAddress(creator, nonce, contractCode)\n\tif _, exists := newBlockState.contractStates[address]; exists {\n\t\treturn \"\", gas, fmt.Errorf(\"contract already exists on designated address\")\n\t}\n\ttexp, newstate, remainingGas, err := initiateContract(contractCode, address, gas, prepaid, storageLimit, newBlockState)\n\tif err != nil {\n\t\treturn \"\", remainingGas, err\n\t} else {\n\t\tnewBlockContracts[address] = contract{string(contractCode), texp, newstate.slot}\n\t\tnewBlockState = newstate\n\t\treturn address, remainingGas, nil\n\t}\n}", "func CreateGetWsCustomizedChEcomContentRequest() (request *GetWsCustomizedChEcomContentRequest) {\n\trequest = &GetWsCustomizedChEcomContentRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChEcomContent\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *KeyVaultClient) setStorageAccountCreateRequest(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters, options *KeyVaultClientSetStorageAccountOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/storage/{storage-account-name}\"\n\tif storageAccountName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageAccountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storage-account-name}\", url.PathEscape(storageAccountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *WebAppsClient) createOrUpdateVnetConnectionSlotCreateRequest(ctx context.Context, resourceGroupName string, name string, vnetName string, slot string, connectionEnvelope VnetInfoResource, options *WebAppsCreateOrUpdateVnetConnectionSlotOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif vnetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vnetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vnetName}\", url.PathEscape(vnetName))\n\tif slot == \"\" {\n\t\treturn nil, errors.New(\"parameter slot cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{slot}\", url.PathEscape(slot))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, connectionEnvelope)\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressAccepted) WithPayload(payload *models.IoK8sAPINetworkingV1beta1Ingress) *CreateNetworkingV1beta1NamespacedIngressAccepted {\n\to.Payload = payload\n\treturn o\n}", "func (*CreateContractRequest) Descriptor() ([]byte, []int) {\n\treturn file_contract_proto_rawDescGZIP(), []int{0}\n}", "func (o CreateChannelBody) MarshalJSON() ([]byte, error) {\n\t_parts := make([][]byte, 0, 2)\n\n\tvar dataCreateChannelParamsBodyAO0 struct {\n\t\tConfigMeta *CreateChannelParamsBodyCreateChannelParamsBodyAO0ConfigMeta `json:\"config_meta,omitempty\"`\n\n\t\tExternalID string `json:\"external_id,omitempty\"`\n\n\t\tIsListableFromUI bool `json:\"is_listable_from_ui,omitempty\"`\n\n\t\tIsVisible bool `json:\"is_visible,omitempty\"`\n\n\t\tName *string `json:\"name\"`\n\n\t\tStatus string `json:\"status,omitempty\"`\n\t}\n\n\tdataCreateChannelParamsBodyAO0.ConfigMeta = o.ConfigMeta\n\n\tdataCreateChannelParamsBodyAO0.ExternalID = o.ExternalID\n\n\tdataCreateChannelParamsBodyAO0.IsListableFromUI = o.IsListableFromUI\n\n\tdataCreateChannelParamsBodyAO0.IsVisible = o.IsVisible\n\n\tdataCreateChannelParamsBodyAO0.Name = o.Name\n\n\tdataCreateChannelParamsBodyAO0.Status = o.Status\n\n\tjsonDataCreateChannelParamsBodyAO0, errCreateChannelParamsBodyAO0 := swag.WriteJSON(dataCreateChannelParamsBodyAO0)\n\tif errCreateChannelParamsBodyAO0 != nil {\n\t\treturn nil, errCreateChannelParamsBodyAO0\n\t}\n\t_parts = append(_parts, jsonDataCreateChannelParamsBodyAO0)\n\tvar dataCreateChannelParamsBodyAO1 struct {\n\t\tPlatform *string `json:\"platform\"`\n\n\t\tType *string `json:\"type\"`\n\t}\n\n\tdataCreateChannelParamsBodyAO1.Platform = o.Platform\n\n\tdataCreateChannelParamsBodyAO1.Type = o.Type\n\n\tjsonDataCreateChannelParamsBodyAO1, errCreateChannelParamsBodyAO1 := swag.WriteJSON(dataCreateChannelParamsBodyAO1)\n\tif errCreateChannelParamsBodyAO1 != nil {\n\t\treturn nil, errCreateChannelParamsBodyAO1\n\t}\n\t_parts = append(_parts, jsonDataCreateChannelParamsBodyAO1)\n\treturn swag.ConcatJSON(_parts...), nil\n}", "func Create(c *golangsdk.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToIcAgentIstallMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\treqOpt := &golangsdk.RequestOpts{OkCodes: []int{200}}\n\t_, r.Err = c.Post(rootURL(c), b, &r.Body, reqOpt)\n\treturn\n}", "func NewAcceptedProposal(data []byte) (*AcceptedProposal, error) {\n\tvar ap AcceptedProposal\n\tdec := gob.NewDecoder(bytes.NewBuffer(data))\n\tif err := dec.Decode(&ap); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ap, nil\n}", "func AddBackupSelection(nbmaster string, httpClient *http.Client, jwt string) {\r\n fmt.Printf(\"\\nSending a PUT request to add backupselection to policy %s...\\n\", testPolicyName)\r\n\r\n bkupSelection := map[string]interface{}{\r\n \"data\": map[string]interface{}{\r\n \"type\": \"backupSelection\",\r\n \"attributes\": map[string]interface{}{\r\n \"selections\": []string{\"vmware:/?filter=Displayname Contains 'rsv' OR Displayname Contains 'mtv'\"}}}}\r\n\r\n bkupSelectionRequest, _ := json.Marshal(bkupSelection)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/\" + policiesUri + testPolicyName + \"/backupselections\"\r\n\r\n request, _ := http.NewRequest(http.MethodPut, uri, bytes.NewBuffer(bkupSelectionRequest))\r\n request.Header.Add(\"Content-Type\", contentTypeV2);\r\n request.Header.Add(\"Authorization\", jwt);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to add backupselection to policy.\\n\")\r\n } else {\r\n if response.StatusCode != 204 {\r\n printErrorResponse(response)\r\n } else {\r\n fmt.Printf(\"backupselection added to %s successfully.\\n\", testPolicyName);\r\n }\r\n }\r\n}", "func (ctx *UploadOpmlContext) Created() error {\n\tctx.ResponseData.WriteHeader(201)\n\treturn nil\n}", "func (client *WebAppsClient) createOrUpdateVnetConnectionGatewayCreateRequest(ctx context.Context, resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway, options *WebAppsCreateOrUpdateVnetConnectionGatewayOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif vnetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vnetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vnetName}\", url.PathEscape(vnetName))\n\tif gatewayName == \"\" {\n\t\treturn nil, errors.New(\"parameter gatewayName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{gatewayName}\", url.PathEscape(gatewayName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, connectionEnvelope)\n}", "func (c *Curator) create(repl int, hint core.StorageHint, expires time.Time) (core.BlobID, core.Error) {\n\tif repl <= 0 || repl > c.config.MaxReplFactor {\n\t\treturn core.BlobID(0), core.ErrInvalidArgument\n\t}\n\tif _, ok := core.EnumNamesStorageHint[hint]; !ok {\n\t\treturn core.BlobID(0), core.ErrInvalidArgument\n\t}\n\n\t// Have Raft figure out the Blob's ID and commit the creation.\n\tvar exp int64\n\tif !expires.IsZero() {\n\t\texp = expires.UnixNano()\n\t}\n\treturn c.stateHandler.CreateBlob(repl, time.Now().UnixNano(), exp, hint, c.stateHandler.GetTerm())\n}", "func (c *OutputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func Create(client *gophercloud.ServiceClient, opts volumes.CreateOptsBuilder, bearer map[string]string) (r volumes.CreateResult) {\n\tb, err := opts.ToVolumeCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\t_, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{202},\n\t\tMoreHeaders: bearer,\n\t})\n\treturn\n}", "func (client *FileServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *FileServicesListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2019-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewCreateCoordinationV1NamespacedLeaseAccepted() *CreateCoordinationV1NamespacedLeaseAccepted {\n\n\treturn &CreateCoordinationV1NamespacedLeaseAccepted{}\n}", "func TestAcceptTransfer(t *testing.T) {\n\ttesttrans := []byte(`{\"To\": \"[email protected]\",\"Status\": \"pending\"}`)\n\n\treq, _ := http.NewRequest(\"POST\", \"/certificates/c001/transfers/create\", bytes.NewBuffer(testtrans))\n\treq.SetBasicAuth(\"rr01\", \"rrejh3294\")\n\tresponse := executeRequest(req)\n\n\treq, _ = http.NewRequest(\"PUT\", \"/certificates/c001/transfers/accept\", nil)\n\treq.SetBasicAuth(\"vvg01\", \"vwh39043f\")\n\tresponse = executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\tif certs[0].OwnerID != \"vvg01\" {\n\t\tt.Errorf(\"Expected certificate owner to be changed. Got '%v'\", certs[0].OwnerID)\n\t}\n\n}" ]
[ "0.6550843", "0.55447596", "0.51314604", "0.51048374", "0.5060699", "0.49805027", "0.49471223", "0.48731118", "0.48619592", "0.48555326", "0.48495626", "0.46945184", "0.46536338", "0.46142438", "0.46005222", "0.45983022", "0.45870212", "0.45816574", "0.45782784", "0.4557088", "0.45565373", "0.4537466", "0.45333484", "0.4518067", "0.44756597", "0.4421847", "0.44195127", "0.44152677", "0.4410969", "0.44052148", "0.43898407", "0.4364009", "0.43531936", "0.4309178", "0.43070608", "0.43011597", "0.42931184", "0.42882106", "0.42828554", "0.42813164", "0.4268126", "0.4260322", "0.42583507", "0.42542532", "0.4249962", "0.42497858", "0.4244242", "0.42241946", "0.42205492", "0.42178443", "0.4209063", "0.41972786", "0.4193719", "0.41902304", "0.4188843", "0.41817528", "0.41660956", "0.41628158", "0.415459", "0.4151306", "0.4148758", "0.41487557", "0.41424286", "0.4126619", "0.41207546", "0.41169116", "0.41145554", "0.41122374", "0.41118705", "0.4109538", "0.4107182", "0.4106233", "0.40995333", "0.4081216", "0.40747797", "0.40743986", "0.40733442", "0.40659043", "0.40585274", "0.4052948", "0.40527862", "0.40508482", "0.40432096", "0.40422466", "0.40369454", "0.40355137", "0.40315706", "0.40303925", "0.40266028", "0.40175134", "0.40165186", "0.40144372", "0.40124148", "0.40119117", "0.39976555", "0.39950922", "0.3992683", "0.39925086", "0.39872402", "0.39817914" ]
0.7195267
0
WithPayload adds the payload to the create storage v1 c s i node accepted response
func (o *CreateStorageV1CSINodeAccepted) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeAccepted { o.Payload = payload return o }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *CreateStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeAccepted) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenAccepted) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeOK) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *ClientPermissionCreateInternalServerError) SetPayload(payload *ClientPermissionCreateInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *ReplaceStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *WeaviateThingTemplatesCreateAccepted) SetPayload(payload *models.ThingTemplateGetResponse) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDOK) SetPayload(payload *models.Storage) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenOK) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressAccepted) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *CreateExtensionsV1beta1NamespacedIngressAccepted {\n\to.Payload = payload\n\treturn o\n}", "func (o *ClientPermissionCreateOK) SetPayload(payload *ClientPermissionCreateOKBody) {\n\to.Payload = payload\n}", "func (o *CreateTaskAccepted) SetPayload(payload strfmt.UUID) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressAccepted) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *PutProjectProjectNameStageStageNameServiceServiceNameResourceCreated) SetPayload(payload *models.Version) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (r CreateRequest) Payload() *model.Payload {\n\tbuf, _ := json.Marshal(r)\n\treturn model.NewPostPayload(buf)\n}", "func (o *CreateStorageSSLCertificateCreated) SetPayload(payload *models.SslCertificate) {\n\to.Payload = payload\n}", "func (o *CreateACLAccepted) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *CreateSpoeCreated) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesOK) SetPayload(payload []*models.VSphereManagementObject) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *ReadStorageV1beta1CSIDriverOK) SetPayload(payload *models.IoK8sAPIStorageV1beta1CSIDriver) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) SetPayload(payload *models.ACLFile) {\n\to.Payload = payload\n}", "func (o *CreateCurrentAPISessionCertificateOK) SetPayload(payload *rest_model.CreateCurrentAPISessionCertificateEnvelope) {\n\to.Payload = payload\n}", "func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}", "func (o *ReplaceCertificatesV1CertificateSigningRequestCreated) SetPayload(payload *models.IoK8sAPICertificatesV1CertificateSigningRequest) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedPodAccepted) SetPayload(payload *models.IoK8sAPICoreV1Pod) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseAccepted) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (nf *NetworkPayload) SetPayload(newpayload []byte) {\n}", "func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *CreateAuthenticationV1beta1TokenReviewAccepted) SetPayload(payload *models.IoK8sAPIAuthenticationV1beta1TokenReview) {\n\to.Payload = payload\n}", "func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloOK) SetPayload(payload *models.CreateHelloResponse) {\n\to.Payload = payload\n}", "func (o *CreateAuthenticationV1beta1TokenReviewCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1beta1TokenReview) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceForbidden) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressAccepted) SetPayload(payload *models.IoK8sAPINetworkingV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *CreateClusterDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateUserGardenCreated) SetPayload(payload *models.Garden) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesForbidden) SetPayload(payload *models.InfraError) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesOK) SetPayload(payload *models.Presigned) {\n\to.Payload = payload\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *ReplaceExtensionsV1beta1NamespacedIngressCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteRuntimeContainerInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *ReplaceNodeV1alpha1RuntimeClassCreated) SetPayload(payload *models.IoK8sAPINodeV1alpha1RuntimeClass) {\n\to.Payload = payload\n}", "func (o *CreateOK) SetPayload(payload *models.Event) {\n\to.Payload = payload\n}", "func (o *CreateACLCreated) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *ReplicateCreated) SetPayload(payload *models.SteeringRequestID) {\n\to.Payload = payload\n}", "func (o *PutReposOwnerRepoContentsPathOK) SetPayload(payload *models.CreateFile) {\n\to.Payload = payload\n}", "func (o *ServiceAddCreated) SetPayload(payload *models.Service) {\n\to.Payload = payload\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressCreated) SetPayload(payload *models.IoK8sAPINetworkingV1beta1Ingress) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceOK) SetPayload(payload *models.Resources) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressOK) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *CreateExtensionsV1beta1NamespacedIngressOK {\n\to.Payload = payload\n\treturn o\n}", "func (c *DeviceController) HandlePayload(w http.ResponseWriter, r *http.Request) {\n\tif r.Body == nil {\n\t\thttp.Error(w, \"Please send a request body\", 400)\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tbodyString := string(bodyBytes)\n\tfmt.Println(bodyString)\n\treq := payloadReq{}\n\terr = json.Unmarshal([]byte(bodyString), &req)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\tfmt.Println(req)\n\tsize := req.Size\n\twaitDur := req.Wait\n\tresp := \"\"\n\twait.WaitDurationFixed(waitDur)\n\tfor i := int64(0); i < size; i++ {\n\t\tresp += \"A\"\n\t}\n\tc.SendJSON(\n\t\tw,\n\t\tr,\n\t\tresp,\n\t\thttp.StatusOK,\n\t)\n}", "func (o *CreateHPCResourceInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *SemverGenerateCreated) SetPayload(payload *models.SemverTagSet) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateNotFound) SetPayload(payload *ClientPermissionCreateNotFoundBody) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenAccepted) WithPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) *CreateCoreV1NamespacedServiceAccountTokenAccepted {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressAccepted) WithPayload(payload *models.IoK8sAPINetworkingV1beta1Ingress) *CreateNetworkingV1beta1NamespacedIngressAccepted {\n\to.Payload = payload\n\treturn o\n}", "func (o *AddConsumptionInternalServerError) WithPayload(payload *models.ErrorResponse) *AddConsumptionInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *WeaviateThingTemplatesCreateAccepted) WithPayload(payload *models.ThingTemplateGetResponse) *WeaviateThingTemplatesCreateAccepted {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetClusterInstallConfigOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesBadRequest) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateUserGardenDefault) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *RegisterInfraEnvCreated) SetPayload(payload *models.InfraEnv) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentCreated) SetPayload(payload *models.IoK8sAPIStorageV1beta1VolumeAttachment) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedPodBindingAccepted) SetPayload(payload *models.IoK8sAPICoreV1Binding) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseCreated) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedConfigMapAccepted) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *WeaviateActionsPatchAccepted) WithPayload(payload *models.ActionGetResponse) *WeaviateActionsPatchAccepted {\n\to.Payload = payload\n\treturn o\n}", "func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusOK) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDUnauthorized) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *CreateExtensionsV1beta1NamespacedIngressCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateOK) WithPayload(payload *models.Event) *CreateOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreatePeerAccepted) SetPayload(payload *models.PeerSection) {\n\to.Payload = payload\n}", "func (o *PostOrderCreated) SetPayload(payload *models.OrderCreateResponse) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeOK) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *ReplaceStorageV1CSINodeOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleCreated) SetPayload(payload *models.IoK8sAPIAutoscalingV1Scale) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateMethodNotAllowed) SetPayload(payload *ClientPermissionCreateMethodNotAllowedBody) {\n\to.Payload = payload\n}", "func (o *CreateFileOK) SetPayload(payload *models.FileInfo) {\n\to.Payload = payload\n}", "func (o *ReplicateCreated) WithPayload(payload *models.SteeringRequestID) *ReplicateCreated {\n\to.Payload = payload\n\treturn o\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tr *resource,\n) (*svcsdk.CreateStageInput, error) {\n\tres := &svcsdk.CreateStageInput{}\n\n\tif r.ko.Spec.AccessLogSettings != nil {\n\t\tf0 := &svcsdk.AccessLogSettings{}\n\t\tif r.ko.Spec.AccessLogSettings.DestinationARN != nil {\n\t\t\tf0.SetDestinationArn(*r.ko.Spec.AccessLogSettings.DestinationARN)\n\t\t}\n\t\tif r.ko.Spec.AccessLogSettings.Format != nil {\n\t\t\tf0.SetFormat(*r.ko.Spec.AccessLogSettings.Format)\n\t\t}\n\t\tres.SetAccessLogSettings(f0)\n\t}\n\tif r.ko.Spec.APIID != nil {\n\t\tres.SetApiId(*r.ko.Spec.APIID)\n\t}\n\tif r.ko.Spec.AutoDeploy != nil {\n\t\tres.SetAutoDeploy(*r.ko.Spec.AutoDeploy)\n\t}\n\tif r.ko.Spec.ClientCertificateID != nil {\n\t\tres.SetClientCertificateId(*r.ko.Spec.ClientCertificateID)\n\t}\n\tif r.ko.Spec.DefaultRouteSettings != nil {\n\t\tf4 := &svcsdk.RouteSettings{}\n\t\tif r.ko.Spec.DefaultRouteSettings.DataTraceEnabled != nil {\n\t\t\tf4.SetDataTraceEnabled(*r.ko.Spec.DefaultRouteSettings.DataTraceEnabled)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.DetailedMetricsEnabled != nil {\n\t\t\tf4.SetDetailedMetricsEnabled(*r.ko.Spec.DefaultRouteSettings.DetailedMetricsEnabled)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.LoggingLevel != nil {\n\t\t\tf4.SetLoggingLevel(*r.ko.Spec.DefaultRouteSettings.LoggingLevel)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.ThrottlingBurstLimit != nil {\n\t\t\tf4.SetThrottlingBurstLimit(*r.ko.Spec.DefaultRouteSettings.ThrottlingBurstLimit)\n\t\t}\n\t\tif r.ko.Spec.DefaultRouteSettings.ThrottlingRateLimit != nil {\n\t\t\tf4.SetThrottlingRateLimit(*r.ko.Spec.DefaultRouteSettings.ThrottlingRateLimit)\n\t\t}\n\t\tres.SetDefaultRouteSettings(f4)\n\t}\n\tif r.ko.Spec.DeploymentID != nil {\n\t\tres.SetDeploymentId(*r.ko.Spec.DeploymentID)\n\t}\n\tif r.ko.Spec.Description != nil {\n\t\tres.SetDescription(*r.ko.Spec.Description)\n\t}\n\tif r.ko.Spec.RouteSettings != nil {\n\t\tf7 := map[string]*svcsdk.RouteSettings{}\n\t\tfor f7key, f7valiter := range r.ko.Spec.RouteSettings {\n\t\t\tf7val := &svcsdk.RouteSettings{}\n\t\t\tif f7valiter.DataTraceEnabled != nil {\n\t\t\t\tf7val.SetDataTraceEnabled(*f7valiter.DataTraceEnabled)\n\t\t\t}\n\t\t\tif f7valiter.DetailedMetricsEnabled != nil {\n\t\t\t\tf7val.SetDetailedMetricsEnabled(*f7valiter.DetailedMetricsEnabled)\n\t\t\t}\n\t\t\tif f7valiter.LoggingLevel != nil {\n\t\t\t\tf7val.SetLoggingLevel(*f7valiter.LoggingLevel)\n\t\t\t}\n\t\t\tif f7valiter.ThrottlingBurstLimit != nil {\n\t\t\t\tf7val.SetThrottlingBurstLimit(*f7valiter.ThrottlingBurstLimit)\n\t\t\t}\n\t\t\tif f7valiter.ThrottlingRateLimit != nil {\n\t\t\t\tf7val.SetThrottlingRateLimit(*f7valiter.ThrottlingRateLimit)\n\t\t\t}\n\t\t\tf7[f7key] = f7val\n\t\t}\n\t\tres.SetRouteSettings(f7)\n\t}\n\tif r.ko.Spec.StageName != nil {\n\t\tres.SetStageName(*r.ko.Spec.StageName)\n\t}\n\tif r.ko.Spec.StageVariables != nil {\n\t\tf9 := map[string]*string{}\n\t\tfor f9key, f9valiter := range r.ko.Spec.StageVariables {\n\t\t\tvar f9val string\n\t\t\tf9val = *f9valiter\n\t\t\tf9[f9key] = &f9val\n\t\t}\n\t\tres.SetStageVariables(f9)\n\t}\n\tif r.ko.Spec.Tags != nil {\n\t\tf10 := map[string]*string{}\n\t\tfor f10key, f10valiter := range r.ko.Spec.Tags {\n\t\t\tvar f10val string\n\t\t\tf10val = *f10valiter\n\t\t\tf10[f10key] = &f10val\n\t\t}\n\t\tres.SetTags(f10)\n\t}\n\n\treturn res, nil\n}", "func (o *UpdateCatalogInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetProviderRegionByIDInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *CreateDiscoveryV1beta1NamespacedEndpointSliceCreated) SetPayload(payload *models.IoK8sAPIDiscoveryV1beta1EndpointSlice) {\n\to.Payload = payload\n}", "func (o *DeleteOfferingByIDInternalServerError) SetPayload(payload *models.ErrorModel) {\n\to.Payload = payload\n}" ]
[ "0.627085", "0.6215604", "0.61715865", "0.59497446", "0.57963955", "0.56648123", "0.56416273", "0.5595874", "0.5580302", "0.5535439", "0.5465944", "0.5458563", "0.54470605", "0.541537", "0.5410048", "0.54049134", "0.53622574", "0.53558326", "0.5351057", "0.5330016", "0.5311847", "0.52435964", "0.5241551", "0.52400243", "0.52272236", "0.5198815", "0.51987237", "0.5198716", "0.5196931", "0.51938653", "0.51833075", "0.5178437", "0.51714027", "0.51599896", "0.51586604", "0.5148002", "0.51434153", "0.5137301", "0.5128888", "0.511072", "0.5109625", "0.51002026", "0.50987244", "0.509533", "0.5094169", "0.5092589", "0.5084495", "0.507789", "0.50728524", "0.50715154", "0.50684106", "0.5066228", "0.5062783", "0.50601244", "0.5056478", "0.5055124", "0.5047146", "0.50442195", "0.5031833", "0.50272816", "0.5026952", "0.5019062", "0.5013473", "0.50095624", "0.5008624", "0.5004186", "0.49972716", "0.4992441", "0.49913752", "0.4988013", "0.49861655", "0.4985762", "0.49850565", "0.4981461", "0.49805778", "0.49796292", "0.49733436", "0.49694803", "0.4967837", "0.4965654", "0.49642745", "0.49626908", "0.4962588", "0.49623072", "0.4950374", "0.49478337", "0.49461392", "0.49407044", "0.4937064", "0.49364182", "0.49347118", "0.49273086", "0.49267897", "0.49232078", "0.49214095", "0.49200428", "0.49196658", "0.49160442", "0.491521", "0.49137637" ]
0.5685385
5
SetPayload sets the payload to the create storage v1 c s i node accepted response
func (o *CreateStorageV1CSINodeAccepted) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) { o.Payload = payload }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *CreateStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *CreateStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (o *ReplaceStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}", "func (nf *NetworkPayload) SetPayload(newpayload []byte) {\n}", "func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}", "func (o *WeaviateThingTemplatesCreateAccepted) SetPayload(payload *models.ThingTemplateGetResponse) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateInternalServerError) SetPayload(payload *ClientPermissionCreateInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) SetPayload(payload *models.ACLFile) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDOK) SetPayload(payload *models.Storage) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesOK) SetPayload(payload []*models.VSphereManagementObject) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateCreated) SetPayload(payload *models.SslCertificate) {\n\to.Payload = payload\n}", "func (o *ReplaceNodeV1alpha1RuntimeClassCreated) SetPayload(payload *models.IoK8sAPINodeV1alpha1RuntimeClass) {\n\to.Payload = payload\n}", "func (tx *Transaction) SetPayload() {\n\tsize := make([]byte, 300)\n\ttx.data.Payload = size\n}", "func (o *DeleteRuntimeContainerInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *UpdateClusterInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *CreateTaskInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *GetDataContextTopologyUUIDNodeNodeUUIDOK) SetPayload(payload *models.TapiTopologyTopologyNode) {\n\to.Payload = payload\n}", "func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}", "func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *CreateSpoeCreated) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetBackupRuntimeEnvironmentsInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *UpdateCatalogInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *ReplicateCreated) SetPayload(payload *models.SteeringRequestID) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesOK) SetPayload(payload *models.Presigned) {\n\to.Payload = payload\n}", "func (o *CreateACLAccepted) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *CreateClusterDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetNFTContractTokenOK) SetPayload(payload *models.NFTTokenRow) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenAccepted) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *CreateOK) SetPayload(payload *models.Event) {\n\to.Payload = payload\n}", "func (o *CreateZoneInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *RegisterInfraEnvCreated) SetPayload(payload *models.InfraEnv) {\n\to.Payload = payload\n}", "func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}", "func (o *PutWorkpaceByIDInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *ReadStorageV1beta1CSIDriverOK) SetPayload(payload *models.IoK8sAPIStorageV1beta1CSIDriver) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateConflict) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetVSphereDatastoresInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceForbidden) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *GetProviderRegionByIDInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetHealthzInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateTaskAccepted) SetPayload(payload strfmt.UUID) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesBadRequest) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *AddNewMaterialsForPostInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *SemverGenerateCreated) SetPayload(payload *models.SemverTagSet) {\n\to.Payload = payload\n}", "func (o *AddRegionAZInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesForbidden) SetPayload(payload *models.InfraError) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateBadRequest) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *AddKeypairInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDInternalServerError) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *DeleteStorageByIDUnauthorized) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenOK) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleCreated) SetPayload(payload *models.IoK8sAPIAutoscalingV1Scale) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesUnauthorized) SetPayload(payload *models.InfraError) {\n\to.Payload = payload\n}", "func (o *PutReposOwnerRepoContentsPathOK) SetPayload(payload *models.CreateFile) {\n\to.Payload = payload\n}", "func (o *GetClusterInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ReplaceCertificatesV1CertificateSigningRequestCreated) SetPayload(payload *models.IoK8sAPICertificatesV1CertificateSigningRequest) {\n\to.Payload = payload\n}", "func (o *CreateCurrentAPISessionCertificateOK) SetPayload(payload *rest_model.CreateCurrentAPISessionCertificateEnvelope) {\n\to.Payload = payload\n}", "func (o *AddConsumptionInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *ReplaceNodeV1alpha1RuntimeClassOK) SetPayload(payload *models.IoK8sAPINodeV1alpha1RuntimeClass) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateACLCreated) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *DeleteOrganizationInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseOK) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *UpdateClusterUnauthorized) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetS3BackupOK) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *GetV1RdssInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesConflict) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *AddRegionAZCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigMethodNotAllowed) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseAccepted) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *CreateACLDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateTCPCheckConflict) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateTCPCheckDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetReadyInternalServerError) SetPayload(payload *models.ReturnCode) {\n\to.Payload = payload\n}", "func (o *DeleteOfferingByIDInternalServerError) SetPayload(payload *models.ErrorModel) {\n\to.Payload = payload\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *CreateUserGardenDefault) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *GetPresignedForClusterFilesMethodNotAllowed) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ConnectCoreV1OptionsNodeProxyOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseCreated) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *PostRegisterDetailsInternalServerError) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *AddOrgMembersV1InternalServerError) SetPayload(payload *model.StandardError) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigForbidden) SetPayload(payload *models.InfraError) {\n\to.Payload = payload\n}", "func (o *GetClusterForbidden) SetPayload(payload *models.InfraError) {\n\to.Payload = payload\n}", "func (o *ThingsDeleteInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *AddKeypairCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *CreatePeerCreated) SetPayload(payload *models.PeerSection) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesBadRequest) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *UpdateHostIgnitionInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}" ]
[ "0.7258962", "0.7145782", "0.70588064", "0.6841116", "0.68142986", "0.6806926", "0.6750216", "0.6683009", "0.6675377", "0.6666445", "0.666559", "0.6665555", "0.66296804", "0.6620157", "0.65994805", "0.65844", "0.658369", "0.6582246", "0.65736467", "0.65595233", "0.6543955", "0.65332866", "0.65327287", "0.6531723", "0.6522737", "0.65148264", "0.6513288", "0.6505356", "0.65001667", "0.649401", "0.6484719", "0.64758986", "0.6473565", "0.64704376", "0.64671063", "0.6465474", "0.6464345", "0.64613897", "0.6459157", "0.64557666", "0.6449278", "0.6447274", "0.6443252", "0.64422786", "0.6442167", "0.6440295", "0.6434729", "0.6431621", "0.6430345", "0.64279014", "0.6427337", "0.64173716", "0.64140946", "0.64121854", "0.6408473", "0.6396868", "0.6388961", "0.6385555", "0.6385066", "0.6379565", "0.63785625", "0.63764673", "0.6371483", "0.6368161", "0.6365231", "0.6362571", "0.63608074", "0.6358023", "0.6356401", "0.6352045", "0.63465583", "0.63443416", "0.63415474", "0.6337702", "0.633696", "0.6331495", "0.63303936", "0.63298595", "0.6329382", "0.6328929", "0.6322645", "0.6317969", "0.63167477", "0.63109624", "0.6310115", "0.6308478", "0.6308418", "0.6305372", "0.62996215", "0.62984866", "0.62982076", "0.62970716", "0.6295962", "0.6293673", "0.6292773", "0.6291157", "0.6287352", "0.62860316", "0.6284921", "0.6284387" ]
0.6972805
3
WriteResponse to the client
func (o *CreateStorageV1CSINodeAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(202) if o.Payload != nil { payload := o.Payload if err := producer.Produce(rw, payload); err != nil { panic(err) // let the recovery middleware deal with this } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Write(w io.Writer) error", "func (c *Operation) writeResponse(rw http.ResponseWriter, status int, data []byte) { // nolint: unparam\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write(data); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}", "func WriteResponse(w http.ResponseWriter, object interface{}, rerr *irma.RemoteError) {\n\tstatus, bts := JsonResponse(object, rerr)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\t_, err := w.Write(bts)\n\tif err != nil {\n\t\tLogWarning(errors.WrapPrefix(err, \"failed to write response\", 0))\n\t}\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func (o *PingOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, v interface{}, statusCode int) {\n\tresBody, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\t_, _ = w.Write(resBody)\n}", "func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func (o *GetPingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func WriteResponse(w http.ResponseWriter, d string) {\n\tw.WriteHeader(200)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Write([]byte(d))\n\treturn\n}", "func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *CreateProgramOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func (o *VerifyAccountCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (c *SwitchVersion) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postSwitchVersion(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *PutRecordingsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyHealthCredentialOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}", "func (o *NewDiscoveryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *WeaviateThingsGetNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *UpdateLinkInPostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetChatroomsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetUIContentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ListVsphereResourceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ResponseWrite(w http.ResponseWriter, responseCode int, responseData interface{}) {\n\t// Write Response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(responseCode)\n\n\t// Write JSON to Response\n\tjson.NewEncoder(w).Encode(responseData)\n}", "func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}", "func (o *PostKeysKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func (o *CreateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WriteResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PutQuestionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}", "func (o *PostOperationsDeleteP2PPathCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *HealthGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyEmailTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsPatchNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (o *WeaviateThingsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *DeleteServiceIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PostOperationsGetNodeEdgePointDetailsCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *UserEditOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviatePeersAnnounceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *CertifyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(writer http.ResponseWriter, response *http.Response) (int64, error) {\n\tdefer response.Body.Close()\n\twriteResponseHeaders(writer, response, false)\n\treturn io.Copy(writer, response.Body)\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *FingerPathsPostCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *PostPlaybookOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPingDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostManagementKubernetesIoV1NodesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *StopAppAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetMeetupsDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostEventCreated) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(201)\n}", "func (o *GetTaskTaskIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateTCPCheckAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PostOperationsGetNetworkElementListCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ServiceInstanceLastOperationGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header RetryAfter\n\n\tretryAfter := o.RetryAfter\n\tif retryAfter != \"\" {\n\t\trw.Header().Set(\"RetryAfter\", retryAfter)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetTaskDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPiecesIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetDetailOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\trw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *LogoutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (o *UploadFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, data interface{}) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": http.StatusOK,\n\t\t},\n\t\t\"data\": data,\n\t}\n\treturn jsonResponse(w, env)\n}", "func (o *WeaviateThingTemplatesCreateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (r *Responder) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\tfor k, v := range r.headers {\n\t\tfor _, val := range v {\n\t\t\trw.Header().Add(k, val)\n\t\t}\n\t}\n\n\trw.WriteHeader(r.code)\n\n\tif r.response != nil {\n\t\tif err := producer.Produce(rw, r.response); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (o *GetGateSourceByGateNameAndMntOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateSpoeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *Output) writeResponse(response string) error {\r\n\t// write the response\r\n\tif _, err := o.writer.WriteString(response + \"\\n\"); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (o *GetTransportByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *TransferOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateUserCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ViewOneOrderOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVisiblePruebasFromQuestionTestInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(500)\n}", "func (o *GetWhaleTranfersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.OperationsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *SearchTournamentsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*models.Tournament, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateTCPCheckCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}" ]
[ "0.81291586", "0.78819287", "0.77723724", "0.7772298", "0.77532965", "0.7740895", "0.7667328", "0.76388013", "0.76095575", "0.75802743", "0.75792146", "0.7567954", "0.75612247", "0.7558208", "0.7545076", "0.75431097", "0.7542526", "0.7535154", "0.75308895", "0.75206727", "0.75192624", "0.7513445", "0.75115013", "0.7506245", "0.75036865", "0.74994856", "0.7488267", "0.7484068", "0.7476975", "0.74681216", "0.7467429", "0.74663514", "0.7464419", "0.74637115", "0.74637115", "0.74621916", "0.74607694", "0.74600816", "0.74461263", "0.7444002", "0.74358237", "0.7427366", "0.7425954", "0.7418714", "0.7413481", "0.74079764", "0.7406604", "0.74053806", "0.7399197", "0.73880255", "0.73864275", "0.7381308", "0.7361386", "0.73605716", "0.73553914", "0.735516", "0.7353125", "0.7348355", "0.734634", "0.7328798", "0.7326309", "0.7318161", "0.73170096", "0.73166984", "0.7316146", "0.7313389", "0.73119754", "0.73103034", "0.73090947", "0.7301638", "0.729702", "0.7292011", "0.7291873", "0.7289617", "0.72853845", "0.7284048", "0.7282259", "0.7280808", "0.72753084", "0.7275278", "0.7273494", "0.72732604", "0.7269464", "0.72693926", "0.7268149", "0.72664154", "0.72615176", "0.72536385", "0.7251536", "0.7249643", "0.72487813", "0.72475266", "0.72414196", "0.723942", "0.7237652", "0.7234592", "0.72287256", "0.72233856", "0.72163224", "0.7215305", "0.72126275" ]
0.0
-1
NewCreateStorageV1CSINodeUnauthorized creates CreateStorageV1CSINodeUnauthorized with default headers values
func NewCreateStorageV1CSINodeUnauthorized() *CreateStorageV1CSINodeUnauthorized { return &CreateStorageV1CSINodeUnauthorized{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewReplaceStorageV1CSINodeUnauthorized() *ReplaceStorageV1CSINodeUnauthorized {\n\n\treturn &ReplaceStorageV1CSINodeUnauthorized{}\n}", "func NewReplaceStorageV1CSINodeUnauthorized() *ReplaceStorageV1CSINodeUnauthorized {\n\treturn &ReplaceStorageV1CSINodeUnauthorized{}\n}", "func NewWatchStorageV1CSINodeListUnauthorized() *WatchStorageV1CSINodeListUnauthorized {\n\treturn &WatchStorageV1CSINodeListUnauthorized{}\n}", "func NewCreateStorageV1CSINodeOK() *CreateStorageV1CSINodeOK {\n\n\treturn &CreateStorageV1CSINodeOK{}\n}", "func NewUnauthorized(cause error) Unauthorized { return Unauthorized(cause.Error()) }", "func (o *CreateStorageV1CSINodeUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func NewAuthPrivilegeRequestWithoutParam() *AuthPrivilegeRequest {\n\n return &AuthPrivilegeRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/management:authPrivilege\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n retentionEnabled := false\n headers[\"x-emc-retention-period\"] = []string{\"0\"}\n if ecsBucket.Retention != \"\" {\n days, err := strconv.ParseInt(ecsBucket.Retention, 10, 64)\n if err == nil {\n if days > 0 {\n seconds := days * 24 * 3600\n headers[\"x-emc-retention-period\"] = []string{int64toString(seconds)}\n retentionEnabled = true\n }\n }\n }\n var expirationCurrentVersions int64\n expirationCurrentVersions = 0\n if ecsBucket.ExpirationCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationCurrentVersions, 10, 64)\n if err == nil {\n expirationCurrentVersions = days\n }\n }\n var expirationNonCurrentVersions int64\n expirationNonCurrentVersions = 0\n if ecsBucket.ExpirationNonCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationNonCurrentVersions, 10, 64)\n if err == nil && ecsBucket.EnableVersioning {\n expirationNonCurrentVersions = days\n }\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n versioningStatusOK := true\n lifecyclePolicyStatusOK := true\n // If the bucket has been created\n if bucketCreateResponse.Code == 200 {\n if !retentionEnabled && ecsBucket.EnableVersioning {\n // Enable versioning\n enableVersioningHeaders := map[string][]string{}\n enableVersioningHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n versioningConfiguration := `\n <VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\n <Status>Enabled</Status>\n <MfaDelete>Disabled</MfaDelete>\n </VersioningConfiguration>\n `\n enableVersioningResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?versioning\", enableVersioningHeaders, versioningConfiguration)\n if enableVersioningResponse.Code != 200 {\n versioningStatusOK = false\n }\n }\n if expirationCurrentVersions > 0 || expirationNonCurrentVersions > 0 {\n lifecyclePolicyHeaders := map[string][]string{}\n lifecyclePolicyHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n lifecyclePolicyConfiguration := `\n <LifecycleConfiguration>\n <Rule>\n <ID>expiration</ID>\n <Prefix></Prefix>\n <Status>Enabled</Status>\n `\n if expirationCurrentVersions > 0 && expirationNonCurrentVersions > 0 {\n // Enable expiration for both current and non current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n } else {\n if expirationCurrentVersions > 0 {\n // Enable expiration for current versions only\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n }\n if expirationNonCurrentVersions > 0 {\n // Enable expiration for non current versions only\n // To fix a bug in ECS 3.0 where an expiration for non current version can't be set if there's no expiration set for current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>1000000</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n }\n }\n lifecyclePolicyConfiguration += `\n </Rule>\n </LifecycleConfiguration>\n `\n lifecyclePolicyResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?lifecycle\", lifecyclePolicyHeaders, lifecyclePolicyConfiguration)\n if lifecyclePolicyResponse.Code != 200 {\n lifecyclePolicyStatusOK = false\n }\n }\n if versioningStatusOK && lifecyclePolicyStatusOK {\n rendering.JSON(w, http.StatusOK, \"\")\n } else {\n message := \"\"\n if !versioningStatusOK {\n message += \" Versioning can't be enabled.\"\n }\n if !lifecyclePolicyStatusOK {\n message += \" Expiration can't be set.\"\n }\n rendering.JSON(w, http.StatusOK, message)\n }\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, s3.AccessKey, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func (r ApiCreateHyperflexExtIscsiStoragePolicyRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexExtIscsiStoragePolicyRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}", "func (o *CreateStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func NewGetNodeUnauthorized() *GetNodeUnauthorized {\n\treturn &GetNodeUnauthorized{}\n}", "func NewWatchStorageV1StorageClassUnauthorized() *WatchStorageV1StorageClassUnauthorized {\n\treturn &WatchStorageV1StorageClassUnauthorized{}\n}", "func NewReadStorageV1beta1CSIDriverUnauthorized() *ReadStorageV1beta1CSIDriverUnauthorized {\n\n\treturn &ReadStorageV1beta1CSIDriverUnauthorized{}\n}", "func NewUnauthorized(err error, msg ...string) *Errs {\n\tif err == nil {\n\t\terr = ErrUnauthorized\n\t}\n\treturn &Errs{\n\t\tcodeHTTP: http.StatusUnauthorized,\n\t\terr: err,\n\t\tkind: trace(2),\n\t\tmessage: msg,\n\t}\n}", "func NewCreateStorageV1CSINodeCreated() *CreateStorageV1CSINodeCreated {\n\n\treturn &CreateStorageV1CSINodeCreated{}\n}", "func (o *ReplaceStorageV1CSINodeUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (a *HyperflexApiService) CreateHyperflexExtIscsiStoragePolicy(ctx context.Context) ApiCreateHyperflexExtIscsiStoragePolicyRequest {\n\treturn ApiCreateHyperflexExtIscsiStoragePolicyRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func NewUnauthorized(err error, msg string) error {\n\treturn &unauthorized{wrap(err, msg, \"\")}\n}", "func (o *CreateStorageV1CSINodeOK) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *CreateStorageV1CSINodeOK {\n\to.Payload = payload\n\treturn o\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, ecsBucket.User, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n log.Print(bucketCreateResponse)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n log.Print(err)\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}", "func (suite *TenantTestSuite) TestCreateUnauthorized() {\n\trequest, _ := http.NewRequest(\"POST\", \"/api/v2/admin/tenants\", strings.NewReader(\"\"))\n\trequest.Header.Set(\"x-api-key\", \"FOO\")\n\trequest.Header.Set(\"Accept\", \"application/json\")\n\tresponse := httptest.NewRecorder()\n\n\tsuite.router.ServeHTTP(response, request)\n\n\tcode := response.Code\n\toutput := response.Body.String()\n\n\tsuite.Equal(401, code, \"Internal Server Error\")\n\tsuite.Equal(suite.respUnauthorized, output, \"Response body mismatch\")\n}", "func NewCreateNetworkingV1beta1NamespacedIngressUnauthorized() *CreateNetworkingV1beta1NamespacedIngressUnauthorized {\n\n\treturn &CreateNetworkingV1beta1NamespacedIngressUnauthorized{}\n}", "func CreateDescribeLogstoreStorageRequest() (request *DescribeLogstoreStorageRequest) {\n\trequest = &DescribeLogstoreStorageRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"DescribeLogstoreStorage\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func NewGetAllStorageUnauthorized() *GetAllStorageUnauthorized {\n\n\treturn &GetAllStorageUnauthorized{}\n}", "func (r ApiCreateHyperflexClusterStoragePolicyRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexClusterStoragePolicyRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}", "func NewCreateStorageV1CSINodeAccepted() *CreateStorageV1CSINodeAccepted {\n\n\treturn &CreateStorageV1CSINodeAccepted{}\n}", "func NewRequest(c *RESTClient) *Request {\n\tvar pathPrefix string\n\tif c.base != nil {\n\t\tpathPrefix = path.Join(\"/\", c.base.Path, c.versionedAPIPath)\n\t} else {\n\t\tpathPrefix = path.Join(\"/\", c.versionedAPIPath)\n\t}\n\n\tr := &Request{\n\t\tc: c,\n\t\tpathPrefix: pathPrefix,\n\t}\n\n\tauthMethod := 0\n\n\tfor _, fn := range []func() bool{c.content.HasBasicAuth, c.content.HasTokenAuth, c.content.HasKeyAuth} {\n\t\tif fn() {\n\t\t\tauthMethod++\n\t\t}\n\t}\n\n\tif authMethod > 1 {\n\t\tr.err = fmt.Errorf(\n\t\t\t\"username/password or bearer token or secretID/secretKey may be set, but should use only one of them\",\n\t\t)\n\n\t\treturn r\n\t}\n\n\tswitch {\n\tcase c.content.HasTokenAuth():\n\t\tr.SetHeader(\"Authorization\", fmt.Sprintf(\"Bearer %s\", c.content.BearerToken))\n\tcase c.content.HasKeyAuth():\n\t\ttokenString := auth.Sign(c.content.SecretID, c.content.SecretKey, \"marmotedu-sdk-go\", c.group+\".marmotedu.com\")\n\t\tr.SetHeader(\"Authorization\", fmt.Sprintf(\"Bearer %s\", tokenString))\n\tcase c.content.HasBasicAuth():\n\t\t// TODO: get token and set header\n\t\tr.SetHeader(\"Authorization\", \"Basic \"+basicAuth(c.content.Username, c.content.Password))\n\t}\n\n\t// set accept content\n\tswitch {\n\tcase len(c.content.AcceptContentTypes) > 0:\n\t\tr.SetHeader(\"Accept\", c.content.AcceptContentTypes)\n\tcase len(c.content.ContentType) > 0:\n\t\tr.SetHeader(\"Accept\", c.content.ContentType+\", */*\")\n\t}\n\n\treturn r\n}", "func NewCreateIOCDefault(code int) *CreateIOCDefault {\n\treturn &CreateIOCDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewCreateImageUnauthorized() *CreateImageUnauthorized {\n\treturn &CreateImageUnauthorized{}\n}", "func NewCreateImageFromSnapshotsRequestWithoutParam() *CreateImageFromSnapshotsRequest {\n\n return &CreateImageFromSnapshotsRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/images:createImageFromSnapshots\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (o *ReplaceStorageV1CSINodeCreated) WithPayload(payload *models.IoK8sAPIStorageV1CSINode) *ReplaceStorageV1CSINodeCreated {\n\to.Payload = payload\n\treturn o\n}", "func createOIDCIssuer(client *azureclients.AzureClientWrapper, name, region, oidcResourceGroupName, storageAccountName, blobContainerName, subscriptionID, tenantID, publicKeyPath, outputDir string, resourceTags map[string]string, dryRun bool) (string, error) {\n\t// Add CCO's \"owned\" tag to resource tags map\n\tresourceTags[fmt.Sprintf(\"%s_%s\", ownedAzureResourceTagKeyPrefix, name)] = ownedAzureResourceTagValue\n\n\tstorageAccountKey := \"\"\n\tif !dryRun {\n\t\t// Ensure that the public key file can be read at the publicKeyPath before continuing\n\t\t_, err := os.ReadFile(publicKeyPath)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"unable to read public key file\")\n\t\t}\n\n\t\t// Ensure the resource group exists\n\t\terr = ensureResourceGroup(client, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure resource group\")\n\t\t}\n\n\t\t// Ensure storage account exists\n\t\terr = ensureStorageAccount(client, storageAccountName, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure storage account\")\n\t\t}\n\n\t\tstorageAccountKey, err = getStorageAccountKey(client, storageAccountName, oidcResourceGroupName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to get storage account key\")\n\t\t}\n\n\t\t// Ensure blob container exists\n\t\terr = ensureBlobContainer(client, oidcResourceGroupName, storageAccountName, blobContainerName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to create blob container\")\n\t\t}\n\t}\n\n\t// Upload OIDC documents (openid-configuration, jwks.json) to the blob container\n\toutputDirAbsPath, err := filepath.Abs(outputDir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tissuerURL, err := uploadOIDCDocuments(client, storageAccountName, storageAccountKey, publicKeyPath, blobContainerName, outputDirAbsPath, dryRun, resourceTags)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to upload OIDC documents\")\n\t}\n\n\t// Write cluster authentication object installer manifest cluster-authentication-02-config.yaml\n\t// for our issuerURL within outputDir/manifests\n\tif err = provisioning.CreateClusterAuthentication(issuerURL, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create cluster authentication manifest\")\n\t}\n\n\t// Write Azure AD pod identity webhook config secret azure-ad-pod-identity-webhook-config.yaml\n\t// within outputDir/manifests\n\tif err = createPodIdentityWebhookConfigSecret(tenantID, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create Azure AD pod identity webhook manifest\")\n\t}\n\n\treturn issuerURL, nil\n}", "func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}", "func NewCreateCoreV1NamespacedPodUnauthorized() *CreateCoreV1NamespacedPodUnauthorized {\n\treturn &CreateCoreV1NamespacedPodUnauthorized{}\n}", "func NewCreateClusterRequestWithoutParam() *CreateClusterRequest {\n\n return &CreateClusterRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/clusters\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func newJWTBase(ctx context.Context, cfg Config) (string, error) {\n\tserviceAccount, project, tokenSource, err := getServiceAccountInfo(ctx, cfg)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"unable to get service account from environment\")\n\t}\n\n\tpayload, err := json.Marshal(map[string]interface{}{\n\t\t\"aud\": \"vault/\" + cfg.Role,\n\t\t\"sub\": serviceAccount,\n\t\t\"exp\": time.Now().UTC().Add(5 * time.Minute).Unix(),\n\t})\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"unable to encode JWT payload\")\n\t}\n\n\thc := getHTTPClient(ctx, cfg)\n\t// reuse base transport and timeout but sprinkle on the token source for IAM access\n\thcIAM := &http.Client{\n\t\tTimeout: hc.Timeout,\n\t\tTransport: &oauth2.Transport{\n\t\t\tSource: tokenSource,\n\t\t\tBase: hc.Transport,\n\t\t},\n\t}\n\tiamClient, err := iam.New(hcIAM)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"unable to init IAM client\")\n\t}\n\n\tif cfg.IAMAddress != \"\" {\n\t\tiamClient.BasePath = cfg.IAMAddress\n\t}\n\n\tresp, err := iamClient.Projects.ServiceAccounts.SignJwt(\n\t\tfmt.Sprintf(\"projects/%s/serviceAccounts/%s\",\n\t\t\tproject, serviceAccount),\n\t\t&iam.SignJwtRequest{Payload: string(payload)}).Context(ctx).Do()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"unable to sign JWT\")\n\t}\n\treturn resp.SignedJwt, nil\n}", "func (s *StorageBase) New(ctx context.Context, ttl time.Duration) (id string, err error) {\n\treturn \"\", ErrorDisabled\n}", "func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewCreateRepository8Unauthorized() *CreateRepository8Unauthorized {\n\treturn &CreateRepository8Unauthorized{}\n}", "func (a *AWSSigv4) CreateAuthorizationHeader(keyid, secret string) string {\n\t_, shs := a.createCanonicalHeaders()\n\n\tsigkey := a.createSignatureKey(secret)\n\tcs := a.createCredentialScope()\n\tcr := a.createCanonicalRequest()\n\tsigstr := a.createStringToSign(cs, cr)\n\n\tsig := a.createSignature(string(sigkey), sigstr)\n\n\tauthheader := a.Algorithm + \" \"\n\tauthheader += \"Credential=\" + keyid + \"/\"\n\tauthheader += a.createCredentialScope() + \", \"\n\tauthheader += \"SignedHeaders=\" + shs + \", \"\n\tauthheader += \"Signature=\" + sig\n\n\treturn authheader\n}", "func NewUpdateHostIgnitionUnauthorized() *UpdateHostIgnitionUnauthorized {\n\n\treturn &UpdateHostIgnitionUnauthorized{}\n}", "func NewCreateCoreV1NamespacedServiceAccountTokenUnauthorized() *CreateCoreV1NamespacedServiceAccountTokenUnauthorized {\n\n\treturn &CreateCoreV1NamespacedServiceAccountTokenUnauthorized{}\n}", "func NewRegisterInfraEnvUnauthorized() *RegisterInfraEnvUnauthorized {\n\n\treturn &RegisterInfraEnvUnauthorized{}\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n session, err := store.Get(r, \"session-name\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n s3 := S3{\n EndPointString: session.Values[\"Endpoint\"].(string),\n AccessKey: session.Values[\"AccessKey\"].(string),\n SecretKey: session.Values[\"SecretKey\"].(string),\n Namespace: session.Values[\"Namespace\"].(string),\n }\n\n decoder := json.NewDecoder(r.Body)\n var bucket NewBucket\n err = decoder.Decode(&bucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n\n // Add the necessary headers for Metadata Search and Access During Outage\n createBucketHeaders := map[string][]string{}\n createBucketHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n createBucketHeaders[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n createBucketHeaders[\"x-emc-metadata-search\"] = []string{\"ObjectName,x-amz-meta-image-width;Integer,x-amz-meta-image-height;Integer,x-amz-meta-gps-latitude;Decimal,x-amz-meta-gps-longitude;Decimal\"}\n\n createBucketResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/\", createBucketHeaders, \"\")\n\n // Enable CORS after the bucket creation to allow the web browser to send requests directly to ECS\n if createBucketResponse.Code == 200 {\n enableBucketCorsHeaders := map[string][]string{}\n enableBucketCorsHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n corsConfiguration := `\n <CORSConfiguration>\n <CORSRule>\n <AllowedOrigin>*</AllowedOrigin>\n <AllowedHeader>*</AllowedHeader>\n <ExposeHeader>x-amz-meta-image-width</ExposeHeader>\n <ExposeHeader>x-amz-meta-image-height</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-latitude</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-longitude</ExposeHeader>\n <AllowedMethod>HEAD</AllowedMethod>\n <AllowedMethod>GET</AllowedMethod>\n <AllowedMethod>PUT</AllowedMethod>\n <AllowedMethod>POST</AllowedMethod>\n <AllowedMethod>DELETE</AllowedMethod>\n </CORSRule>\n </CORSConfiguration>\n `\n enableBucketCorsResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/?cors\", enableBucketCorsHeaders, corsConfiguration)\n if enableBucketCorsResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, struct {\n CorsConfiguration string `json:\"cors_configuration\"`\n Bucket string `json:\"bucket\"`\n } {\n CorsConfiguration: corsConfiguration,\n Bucket: bucket.Name,\n })\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket created, but CORS can't be enabled\"}\n }\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket can't be created\"}\n }\n return nil\n}", "func (client *LROSADsClient) post202RetryInvalidHeaderCreateRequest(ctx context.Context, options *LROSADsClientBeginPost202RetryInvalidHeaderOptions) (*policy.Request, error) {\n\turlPath := \"/lro/error/post/202/retry/invalidheader\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif options != nil && options.Product != nil {\n\t\tif err := runtime.MarshalAsJSON(req, *options.Product); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn req, nil\n\t}\n\treturn req, nil\n}", "func NewSesseion(w http.ResponseWriter, r *http.Request) {\n\tcreds := &structs.Credentials{}\n\terr := json.NewDecoder(r.Body).Decode(creds)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\terro := !checkCredentials(*creds)\n\n\tif erro {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n}", "func NewObjectsCreateUnauthorized() *ObjectsCreateUnauthorized {\n\treturn &ObjectsCreateUnauthorized{}\n}", "func NewCreateIOCForbidden() *CreateIOCForbidden {\n\treturn &CreateIOCForbidden{}\n}", "func TestInitToken_Ensure_NoExpectedToken_NotExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"\",\n\t}\n\n\t// expects a read and vault says secret is not existing\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\t// expect a create new orphan\n\tfv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{}).Return(&vault.Secret{\n\t\tAuth: &vault.SecretAuth{\n\t\t\tClientToken: \"my-new-random-token\",\n\t\t},\n\t}, nil)\n\n\t// expect a write of the new token\n\tfv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{\"init_token\": \"my-new-random-token\"}).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"my-new-random-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"my-new-random-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"my-new-random-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func createNewAuthToken(w http.ResponseWriter, r *http.Request, u *chatable.User) (*chatable.PublicToken, chatable.CompoundError) {\n\t// create a new token for the user\n\t// client_id is on the header\n\tclientID := r.Header.Get(\"ClientID\")\n\tcid, err := strconv.Atoi(clientID)\n\tif err != nil {\n\t\tcid = -1\n\t}\n\tat := chatable.NewAuthToken(u.ID, cid, chatable.StringSlice{\"all\"})\n\tif err = store.AuthTokenStore.Create(at); err != nil {\n\t\treturn nil, chatable.NewServerError(err.Error())\n\t}\n\treturn at.ToPublicToken(), nil\n}", "func NewUnauthorized(res calcsvc.Unauthorized) Unauthorized {\n\tbody := Unauthorized(res)\n\treturn body\n}", "func NewWatchStorageV1CSINodeListOK() *WatchStorageV1CSINodeListOK {\n\treturn &WatchStorageV1CSINodeListOK{}\n}", "func NewCreateCoreV1NamespacedServiceAccountTokenUnauthorized() *CreateCoreV1NamespacedServiceAccountTokenUnauthorized {\n\treturn &CreateCoreV1NamespacedServiceAccountTokenUnauthorized{}\n}", "func NewGetAWSNodeTypesUnauthorized() *GetAWSNodeTypesUnauthorized {\n\treturn &GetAWSNodeTypesUnauthorized{}\n}", "func NewAddTemplateRequestWithoutParam() *AddTemplateRequest {\n\n return &AddTemplateRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/addTemplate\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func NewCreateHPCResourceUnauthorized() *CreateHPCResourceUnauthorized {\n\n\treturn &CreateHPCResourceUnauthorized{}\n}", "func NewWeaviateKeyCreateUnauthorized() *WeaviateKeyCreateUnauthorized {\n\treturn &WeaviateKeyCreateUnauthorized{}\n}", "func (idx *Unique) Init() error {\n\ttokenManager, err := jwt.New(map[string]interface{}{\n\t\t\"secret\": idx.cs3conf.JWTSecret,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tidx.tokenManager = tokenManager\n\n\tclient, err := pool.GetStorageProviderServiceClient(idx.cs3conf.ProviderAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tidx.storageProvider = client\n\n\tctx := context.Background()\n\ttk, err := idx.authenticate(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx = metadata.AppendToOutgoingContext(ctx, revactx.TokenHeader, tk)\n\n\tif err := idx.makeDirIfNotExists(ctx, idx.indexBaseDir); err != nil {\n\t\treturn err\n\t}\n\n\tif err := idx.makeDirIfNotExists(ctx, idx.indexRootDir); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func NewCreateUnauthorized() *CreateUnauthorized {\n\treturn &CreateUnauthorized{}\n}", "func NewInvokeCommandRequestWithoutParam() *InvokeCommandRequest {\n\n return &InvokeCommandRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/invokeCommand\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func NewReadFromMicrostorageUnauthorized() *ReadFromMicrostorageUnauthorized {\n\treturn &ReadFromMicrostorageUnauthorized{}\n}", "func NewDeleteStorageByIDUnauthorized() *DeleteStorageByIDUnauthorized {\n\n\treturn &DeleteStorageByIDUnauthorized{}\n}", "func NewCreateCompanyUnauthorized(body *CreateCompanyUnauthorizedResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func (a *HyperflexApiService) CreateHyperflexExtIscsiStoragePolicyExecute(r ApiCreateHyperflexExtIscsiStoragePolicyRequest) (*HyperflexExtIscsiStoragePolicy, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue *HyperflexExtIscsiStoragePolicy\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"HyperflexApiService.CreateHyperflexExtIscsiStoragePolicy\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/hyperflex/ExtIscsiStoragePolicies\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\tif r.hyperflexExtIscsiStoragePolicy == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"hyperflexExtIscsiStoragePolicy is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ifMatch != nil {\n\t\tlocalVarHeaderParams[\"If-Match\"] = parameterToString(*r.ifMatch, \"\")\n\t}\n\tif r.ifNoneMatch != nil {\n\t\tlocalVarHeaderParams[\"If-None-Match\"] = parameterToString(*r.ifNoneMatch, \"\")\n\t}\n\t// body params\n\tlocalVarPostBody = r.hyperflexExtIscsiStoragePolicy\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v Error\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func NewGetNodeV1beta1APIResourcesUnauthorized() *GetNodeV1beta1APIResourcesUnauthorized {\n\treturn &GetNodeV1beta1APIResourcesUnauthorized{}\n}", "func (client *LROSADsClient) delete202RetryInvalidHeaderCreateRequest(ctx context.Context, options *LROSADsClientBeginDelete202RetryInvalidHeaderOptions) (*policy.Request, error) {\n\turlPath := \"/lro/error/delete/202/retry/invalidheader\"\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewInternalV1StorageRegionsThresholdsPutUnauthorized() *InternalV1StorageRegionsThresholdsPutUnauthorized {\n\treturn &InternalV1StorageRegionsThresholdsPutUnauthorized{}\n}", "func NewAuthorizationHeader(clientID, secret string) string {\n\treturn \"Basic \" + base64.StdEncoding.EncodeToString([]byte(clientID+\":\"+secret))\n}", "func TestInitToken_Ensure_NoExpectedToken_AlreadyExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"\",\n\t}\n\n\t// expect a read and vault says secret is existing\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\t&vault.Secret{\n\t\t\tData: map[string]interface{}{\"init_token\": \"existing-token\"},\n\t\t},\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"existing-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"existing-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"existing-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func newAuthenticatedRequest(t testing.TB, method, path string, body io.Reader, user, pass string) *http.Request {\n\treq := newRequest(t, method, path, body)\n\treq.SetBasicAuth(user, pass)\n\treq.Header.Add(\"Accept\", resticAPIV2)\n\treturn req\n}", "func NewGetTagUnauthorized() *GetTagUnauthorized {\n\treturn &GetTagUnauthorized{}\n}", "func (r ApiCreateHyperflexExtFcStoragePolicyRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexExtFcStoragePolicyRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}", "func (o *CreateStorageV1CSINodeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func NewHeader(alg Alg) *Header {\n\treturn &Header{\n\t\tType: \"JWT\",\n\t\tAlg: alg.Name(),\n\t}\n}", "func CreateCsr(commonName string, country string, state string, city string,\n organization string, organizationalUnit string,\n emailAddress string) ([]byte, []byte, error) {\n\n priv, err := rsa.GenerateKey(rand.Reader, 2048)\n if err != nil {\n return nil, nil, err\n }\n\n template := x509.CertificateRequest{\n Subject: pkix.Name{\n CommonName: commonName,\n Country: []string{country},\n Province: []string{state},\n Locality: []string{city},\n Organization: []string{organization},\n OrganizationalUnit: []string{organizationalUnit},\n },\n SignatureAlgorithm: x509.SHA256WithRSA,\n EmailAddresses: []string{emailAddress},\n }\n\n random := rand.Reader\n csrBytes, err := x509.CreateCertificateRequest(random, &template, priv)\n if err != nil {\n return nil, nil, err\n }\n\n block := pem.Block{\n Type: \"CERTIFICATE REQUEST\",\n Bytes: csrBytes,\n }\n certPem := pem.EncodeToMemory(&block)\n\n block = pem.Block{\n Type: \"RSA PRIVATE KEY\",\n Bytes: x509.MarshalPKCS1PrivateKey(priv),\n }\n privPem := pem.EncodeToMemory(&block)\n\n return privPem, certPem, nil\n}", "func (sdk *SDK) NewNode(prefer *cloudsvr.PreferAttrs) (*cloudsvr.CloudNode, *cloudsvr.PreferAttrs, error) {\n\n\tvar (\n\t\tpassword, _ = utils.GenPassword(24)\n\t\treq = &CreateInstanceRequest{\n\t\t\tImageID: OsImage,\n\t\t\tPassword: password,\n\t\t\tInstanceName: NodeName,\n\t\t\tInstanceChargeType: \"PostPaid\", // require RMB 100+\n\t\t\tSecurityGroupID: \"whatever\", // will be automatic rewrite\n\t\t\tInternetChargeType: \"PayByTraffic\", // traffic payment\n\t\t\tInternetMaxBandwidthOut: \"100\", // 100M\n\t\t\tLabels: NodeLabels,\n\t\t}\n\t)\n\n\t// if prefered attributes set, use prefer region & instance-type\n\tif prefer != nil && prefer.Valid() == nil {\n\t\tvar (\n\t\t\treg = prefer.RegionOrZone\n\t\t\ttyp = prefer.InstanceType\n\t\t)\n\t\tlog.Printf(\"create aliyun ecs by using prefered region %s, instance type %s ...\", reg, typ)\n\n\t\treq.RegionID = reg // cn-beijing\n\t\treq.InstanceType = typ // ecs.n4.large\n\n\t\tcreated, err := sdk.createNode(req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tlog.Printf(\"created prefered aliyun ecs succeed: %s\", created.ID)\n\t\treturn created, prefer, nil\n\t}\n\n\tlog.Infoln(\"creating aliyun ecs by trying all regions & types ...\")\n\n\t// if prefered created failed, or without prefer region & instance-type\n\t// try best on all region & instance-types to create the new aliyun ecs\n\tvar (\n\t\tregions []RegionType // all of aliyun regions\n\t\ttypes []InstanceTypeItemType // all of instance types within given range of mems & cpus\n\t\terr error\n\t\tcreated *cloudsvr.CloudNode\n\t)\n\n\t// list all regions\n\tregions, err = sdk.ListRegions()\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListRegions() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\t// list specified range of instance types\n\ttypes, err = sdk.ListInstanceTypes(2, 4, 2, 8) // TODO range of given cpus/mems ranges\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListInstanceTypes() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tvar (\n\t\tuseRegionID, useInsType string\n\t)\n\t// range all regions & types to try to create ecs instance\n\tfor _, reg := range regions {\n\t\tfor _, typ := range types {\n\t\t\treq.RegionID = reg.RegionID // cn-beijing\n\t\t\treq.InstanceType = typ.InstanceTypeID // ecs.n4.large\n\n\t\t\t// if created succeed, directly return\n\t\t\tcreated, err = sdk.createNode(req)\n\t\t\tif err == nil {\n\t\t\t\tuseRegionID, useInsType = reg.RegionID, typ.InstanceTypeID\n\t\t\t\tgoto END\n\t\t\t}\n\n\t\t\tif sdk.isFatalError(err) {\n\t\t\t\tlog.Errorf(\"create aliyun ecs got fatal error, stop retry: %v\", err)\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tlog.Warnf(\"create aliyun ecs failed: %v, will retry another region or type\", err)\n\t\t}\n\t}\n\nEND:\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog.Printf(\"created aliyun ecs %s at %s and type is %s\", created.ID, useRegionID, useInsType)\n\treturn created, &cloudsvr.PreferAttrs{RegionOrZone: useRegionID, InstanceType: useInsType}, nil\n}", "func NewQueryForbiddenInfoListRequestWithoutParam() *QueryForbiddenInfoListRequest {\n\n return &QueryForbiddenInfoListRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/forbiddenInfo:query\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (client IdentityClient) createTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagDefaults\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewCreateFileDefault(code int) *CreateFileDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &CreateFileDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewHeaders()(*Headers) {\n m := &Headers{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func TestInitToken_Ensure_ExpectedToken_NotExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"expected-token\",\n\t}\n\n\t// expect a new token creation\n\tfv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{ID: \"expected-token\"}).Return(&vault.Secret{\n\t\tAuth: &vault.SecretAuth{\n\t\t\tClientToken: \"expected-token\",\n\t\t},\n\t}, nil)\n\n\t// expect a read and vault says secret is not existing, then after it is written to return token\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tgomock.InOrder(\n\t\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\t\tnil,\n\t\t\tnil,\n\t\t).MinTimes(1),\n\t\t// expect a write of the new token from user flag\n\t\tfv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{\"init_token\": \"expected-token\"}).Return(\n\t\t\tnil,\n\t\t\tnil,\n\t\t),\n\t\t// allow read out of token from user\n\t\tfv.fakeLogical.EXPECT().Read(initTokenPath).AnyTimes().Return(\n\t\t\t&vault.Secret{\n\t\t\t\tData: map[string]interface{}{\"init_token\": \"expected-token\"},\n\t\t\t},\n\t\t\tnil,\n\t\t),\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"expected-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"expected-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"expected-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func NewHeader(m map[string]string) Header {\n\tfor k, v := range m {\n\t\tdelete(m, k)\n\t\tm[http.CanonicalHeaderKey(k)] = v\n\t}\n\treturn Header(m)\n}", "func NewUnauthorized() *AppError {\n\treturn NewUnauthorizedR(StatusText(Unauthenticated))\n}", "func New() gocsi.StoragePluginProvider {\n\tsvc := service.New()\n\treturn &gocsi.StoragePlugin{\n\t\tController: svc,\n\t\tIdentity: svc,\n\t\tNode: svc,\n\t\tBeforeServe: svc.BeforeServe,\n\t\tRegisterAdditionalServers: svc.RegisterAdditionalServers,\n\n\t\tEnvVars: []string{\n\t\t\t// Enable request validation\n\t\t\tgocsi.EnvVarSpecReqValidation + \"=true\",\n\n\t\t\t// Enable serial volume access\n\t\t\tgocsi.EnvVarSerialVolAccess + \"=true\",\n\t\t},\n\t}\n}", "func NewCreateIntegrationUnauthorized() *CreateIntegrationUnauthorized {\n\treturn &CreateIntegrationUnauthorized{}\n}", "func createStorageProfile(masterIp string, sshClientConfig *ssh.ClientConfig,\n\tstoragePolicyName string, clientIndex int) error {\n\tcreateStoragePolicy := govcLoginCmdForMultiVC(clientIndex) +\n\t\t\"govc storage.policy.create -category=shared-cat-todelete1 -tag=shared-tag-todelete1 \" + storagePolicyName\n\tframework.Logf(\"Create storage policy: %s \", createStoragePolicy)\n\tcreateStoragePolicytRes, err := sshExec(sshClientConfig, masterIp, createStoragePolicy)\n\tif err != nil && createStoragePolicytRes.Code != 0 {\n\t\tfssh.LogResult(createStoragePolicytRes)\n\t\treturn fmt.Errorf(\"couldn't execute command: %s on host: %v , error: %s\",\n\t\t\tcreateStoragePolicy, masterIp, err)\n\t}\n\treturn nil\n}", "func NewCreateExtensionsV1beta1NamespacedIngressUnauthorized() *CreateExtensionsV1beta1NamespacedIngressUnauthorized {\n\n\treturn &CreateExtensionsV1beta1NamespacedIngressUnauthorized{}\n}", "func NewCreateCoreV1PersistentVolumeUnauthorized() *CreateCoreV1PersistentVolumeUnauthorized {\n\treturn &CreateCoreV1PersistentVolumeUnauthorized{}\n}", "func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) {\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"restype\", \"container\")\n\tif options != nil && options.Timeout != nil {\n\t\treqQP.Set(\"timeout\", strconv.FormatInt(int64(*options.Timeout), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.Metadata != nil {\n\t\tfor k, v := range options.Metadata {\n\t\t\tif v != nil {\n\t\t\t\treq.Raw().Header[\"x-ms-meta-\"+k] = []string{*v}\n\t\t\t}\n\t\t}\n\t}\n\tif options != nil && options.Access != nil {\n\t\treq.Raw().Header[\"x-ms-blob-public-access\"] = []string{string(*options.Access)}\n\t}\n\treq.Raw().Header[\"x-ms-version\"] = []string{\"2020-10-02\"}\n\tif options != nil && options.RequestID != nil {\n\t\treq.Raw().Header[\"x-ms-client-request-id\"] = []string{*options.RequestID}\n\t}\n\tif containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil {\n\t\treq.Raw().Header[\"x-ms-default-encryption-scope\"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope}\n\t}\n\tif containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil {\n\t\treq.Raw().Header[\"x-ms-deny-encryption-scope-override\"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/xml\"}\n\treturn req, nil\n}", "func (client StorageGatewayClient) createStorageGateway(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateStorageGatewayResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func TestCreateTransferBadCredentials(t *testing.T) {\n\ttesttrans := []byte(`{\"To\": \"[email protected]\",\"Status\": \"pending\"}`)\n\n\treq, _ := http.NewRequest(\"POST\", \"/certificates/c001/transfers/create\", bytes.NewBuffer(testtrans))\n\treq.SetBasicAuth(\"rr01\", \"rr\")\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusUnauthorized, response.Code)\n\n}", "func (client *LROSADsClient) post202NoLocationCreateRequest(ctx context.Context, options *LROSADsClientBeginPost202NoLocationOptions) (*policy.Request, error) {\n\turlPath := \"/lro/error/post/202/nolocation\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif options != nil && options.Product != nil {\n\t\tif err := runtime.MarshalAsJSON(req, *options.Product); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn req, nil\n\t}\n\treturn req, nil\n}", "func NewCreateVideoUploadTaskRequestWithoutParam() *CreateVideoUploadTaskRequest {\n\n return &CreateVideoUploadTaskRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/videoUploadTask\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func newTKey(key string) storage.TKey {\n\treturn storage.NewTKey(keyStandard, append([]byte(key), 0))\n}", "func NewReplaceStorageV1CSINodeOK() *ReplaceStorageV1CSINodeOK {\n\n\treturn &ReplaceStorageV1CSINodeOK{}\n}", "func newRequest(method, url string, body string) *http.Request {\n\treq, err := http.NewRequest(method, url, strings.NewReader(body))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"X-API-Token\", \"token1\")\n\treturn req\n}", "func NewReadCoreV1NamespacedEndpointsUnauthorized() *ReadCoreV1NamespacedEndpointsUnauthorized {\n\treturn &ReadCoreV1NamespacedEndpointsUnauthorized{}\n}", "func NewCreateInputPortUnauthorized() *CreateInputPortUnauthorized {\n\treturn &CreateInputPortUnauthorized{}\n}", "func TestCreateTransferNotMine(t *testing.T) {\n\ttesttrans := []byte(`{\"To\": \"[email protected]\",\"Status\": \"pending\"}`)\n\n\treq, _ := http.NewRequest(\"POST\", \"/certificates/c001/transfers/create\", bytes.NewBuffer(testtrans))\n\treq.SetBasicAuth(\"vvg01\", \"vwh39043f\")\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusUnauthorized, response.Code)\n\n}" ]
[ "0.6248607", "0.6122658", "0.61036927", "0.55000645", "0.53274125", "0.52621156", "0.5260533", "0.51208663", "0.50959045", "0.504985", "0.5015619", "0.49926725", "0.49710575", "0.4961555", "0.4877436", "0.48435402", "0.48254943", "0.47950625", "0.4785675", "0.47426367", "0.47387502", "0.47321883", "0.4710622", "0.46985665", "0.46813908", "0.46621743", "0.46551815", "0.46376783", "0.46282953", "0.4582574", "0.4579618", "0.45712745", "0.45685133", "0.45582998", "0.45532185", "0.45527777", "0.45498875", "0.45486665", "0.45472834", "0.45442867", "0.45442268", "0.45328107", "0.45254603", "0.45229128", "0.45220092", "0.45031944", "0.44950706", "0.44840357", "0.4483641", "0.44834253", "0.44773844", "0.44550186", "0.44533262", "0.4443272", "0.44288155", "0.4424304", "0.44198155", "0.44001874", "0.43923566", "0.43873966", "0.4387201", "0.43713862", "0.4369949", "0.43539965", "0.43430024", "0.43353096", "0.43307403", "0.43258467", "0.4320376", "0.43129942", "0.430106", "0.42991835", "0.42843536", "0.42835128", "0.42796808", "0.42686772", "0.42672902", "0.42653424", "0.4264685", "0.42620435", "0.4243814", "0.42380902", "0.42281163", "0.42259455", "0.42023918", "0.4199626", "0.41976625", "0.4196505", "0.41884103", "0.41812706", "0.41743812", "0.41733456", "0.41721222", "0.41538182", "0.41514346", "0.41488266", "0.4147777", "0.41456902", "0.41372216", "0.4132096" ]
0.71874326
0
WriteResponse to the client
func (o *CreateStorageV1CSINodeUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses rw.WriteHeader(401) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Write(w io.Writer) error", "func (c *Operation) writeResponse(rw http.ResponseWriter, status int, data []byte) { // nolint: unparam\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write(data); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func WriteResponse(w http.ResponseWriter, object interface{}, rerr *irma.RemoteError) {\n\tstatus, bts := JsonResponse(object, rerr)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\t_, err := w.Write(bts)\n\tif err != nil {\n\t\tLogWarning(errors.WrapPrefix(err, \"failed to write response\", 0))\n\t}\n}", "func (o *PingOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, v interface{}, statusCode int) {\n\tresBody, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\t_, _ = w.Write(resBody)\n}", "func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func (o *GetPingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func WriteResponse(w http.ResponseWriter, d string) {\n\tw.WriteHeader(200)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Write([]byte(d))\n\treturn\n}", "func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *CreateProgramOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func (o *VerifyAccountCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (c *SwitchVersion) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postSwitchVersion(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *PutRecordingsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyHealthCredentialOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}", "func (o *NewDiscoveryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *WeaviateThingsGetNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *UpdateLinkInPostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetChatroomsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetUIContentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ListVsphereResourceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ResponseWrite(w http.ResponseWriter, responseCode int, responseData interface{}) {\n\t// Write Response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(responseCode)\n\n\t// Write JSON to Response\n\tjson.NewEncoder(w).Encode(responseData)\n}", "func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}", "func (o *PostKeysKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func (o *CreateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WriteResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PutQuestionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}", "func (o *PostOperationsDeleteP2PPathCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *HealthGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsPatchNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (o *VerifyEmailTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DeleteServiceIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PostOperationsGetNodeEdgePointDetailsCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *UserEditOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviatePeersAnnounceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *CertifyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(writer http.ResponseWriter, response *http.Response) (int64, error) {\n\tdefer response.Body.Close()\n\twriteResponseHeaders(writer, response, false)\n\treturn io.Copy(writer, response.Body)\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *FingerPathsPostCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *PostPlaybookOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPingDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostManagementKubernetesIoV1NodesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *StopAppAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetMeetupsDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostEventCreated) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(201)\n}", "func (o *GetTaskTaskIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateTCPCheckAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PostOperationsGetNetworkElementListCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ServiceInstanceLastOperationGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header RetryAfter\n\n\tretryAfter := o.RetryAfter\n\tif retryAfter != \"\" {\n\t\trw.Header().Set(\"RetryAfter\", retryAfter)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPiecesIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetTaskDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetDetailOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\trw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (o *LogoutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UploadFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, data interface{}) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": http.StatusOK,\n\t\t},\n\t\t\"data\": data,\n\t}\n\treturn jsonResponse(w, env)\n}", "func (o *WeaviateThingTemplatesCreateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (r *Responder) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\tfor k, v := range r.headers {\n\t\tfor _, val := range v {\n\t\t\trw.Header().Add(k, val)\n\t\t}\n\t}\n\n\trw.WriteHeader(r.code)\n\n\tif r.response != nil {\n\t\tif err := producer.Produce(rw, r.response); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (o *GetGateSourceByGateNameAndMntOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateSpoeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *Output) writeResponse(response string) error {\r\n\t// write the response\r\n\tif _, err := o.writer.WriteString(response + \"\\n\"); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (o *GetTransportByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *TransferOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateUserCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ViewOneOrderOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVisiblePruebasFromQuestionTestInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(500)\n}", "func (o *GetWhaleTranfersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.OperationsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *SearchTournamentsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*models.Tournament, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateTCPCheckCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}" ]
[ "0.81303823", "0.7882039", "0.77722245", "0.7771901", "0.7753117", "0.7740585", "0.76670325", "0.7638451", "0.76095873", "0.75798", "0.7579178", "0.7567389", "0.7560546", "0.75579476", "0.75447774", "0.7542929", "0.75416607", "0.753386", "0.7531158", "0.75192654", "0.75191355", "0.7513389", "0.7512029", "0.75050455", "0.7503395", "0.74984574", "0.74875605", "0.74839836", "0.74772394", "0.7467842", "0.746699", "0.7465759", "0.7464175", "0.746404", "0.746404", "0.7461224", "0.7460309", "0.74595356", "0.74463046", "0.7443478", "0.7435917", "0.7426582", "0.7425581", "0.74186546", "0.7413175", "0.7407469", "0.74063516", "0.74048966", "0.7398737", "0.7389631", "0.738607", "0.73806983", "0.7360552", "0.7360491", "0.7355327", "0.7354953", "0.73532444", "0.7347445", "0.734586", "0.732798", "0.732577", "0.73178244", "0.7316643", "0.7316071", "0.7315527", "0.7312546", "0.73114824", "0.7310336", "0.7309039", "0.73007035", "0.7297214", "0.7291373", "0.7291277", "0.72884554", "0.72845477", "0.72835207", "0.7281928", "0.7281033", "0.72751075", "0.7274423", "0.7273193", "0.72730565", "0.72695094", "0.7269139", "0.72690886", "0.7265927", "0.72615093", "0.72529227", "0.7251764", "0.72490144", "0.72479355", "0.72469014", "0.72407585", "0.72390425", "0.72367245", "0.7234706", "0.722777", "0.722197", "0.7215153", "0.72140837", "0.7213089" ]
0.0
-1
Deprecated: Use HelloRequest.ProtoReflect.Descriptor instead.
func (*HelloRequest) Descriptor() ([]byte, []int) { return file_proto_sample_proto_rawDescGZIP(), []int{0} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_hello_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloservice_proto_rawDescGZIP(), []int{0}\n}", "func (*SayHelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_tutorial_proto_rawDescGZIP(), []int{1}\n}", "func (*SayHelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{14}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_mkit_service_greeter_v1_greeter_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_config_hello_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_sil_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_basic_basic_proto_rawDescGZIP(), []int{1}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_quickstart_greeter_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_server_proto_rawDescGZIP(), []int{0}\n}", "func (*SuperNodeRequest_HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_supernode_proto_rawDescGZIP(), []int{4, 0}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}\n}", "func (*PaqueteRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}\n}", "func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}", "func (*GetServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{6}\n}", "func (*WebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{7}\n}", "func (*CodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{1}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_comments_proto_rawDescGZIP(), []int{3}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{1}\n}", "func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{8}\n}", "func (*HelloReq) Descriptor() ([]byte, []int) {\n\treturn file_helloword_proto_rawDescGZIP(), []int{0}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{3}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateAlterRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{1}\n}", "func (*CMsgClientToGCPlayerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{143}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{7}\n}", "func (x *fastReflection_AddressStringToBytesRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressStringToBytesRequest\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}", "func (*DiagnoseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{16}\n}", "func (*GreetRequest) Descriptor() ([]byte, []int) {\n\treturn file_greet_proto_rawDescGZIP(), []int{0}\n}", "func (*CMsgGCPlayerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{117}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{10}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{14}\n}", "func (*CalculatorRequest) Descriptor() ([]byte, []int) {\n\treturn file_basicpb_unary_api_proto_rawDescGZIP(), []int{4}\n}", "func (*PatchKeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{74}\n}", "func (*GreetRequest) Descriptor() ([]byte, []int) {\n\treturn file_chat_proto_rawDescGZIP(), []int{0}\n}", "func (*GenerateMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{0}\n}", "func (*TelemetryRequest) Descriptor() ([]byte, []int) {\n\treturn file_automate_gateway_api_telemetry_telemetry_proto_rawDescGZIP(), []int{0}\n}", "func (*Hello) Descriptor() ([]byte, []int) {\n\treturn file_proto_laptopService_proto_rawDescGZIP(), []int{3}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{1}\n}", "func (*GetVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{9}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_index_faults_rpc_rpc_proto_rawDescGZIP(), []int{2}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteFriendRequest) Descriptor() ([]byte, []int) {\n\treturn file_console_proto_rawDescGZIP(), []int{7}\n}", "func (*OriginalDetectIntentRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{2}\n}", "func (x *fastReflection_AddressBytesToStringRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressBytesToStringRequest\n}", "func (*HelloResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_hello_proto_rawDescGZIP(), []int{1}\n}", "func (*GreetRequest) Descriptor() ([]byte, []int) {\n\treturn file_greeter_greeterpb_greeter_proto_rawDescGZIP(), []int{1}\n}", "func (*GeneratedRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_auth_proto_rawDescGZIP(), []int{0}\n}", "func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{2}\n}", "func (*DescribeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{4}\n}", "func (*EndpointRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{13}\n}", "func (*HealthCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_greeter_proto_health_health_proto_rawDescGZIP(), []int{0}\n}", "func (*ModelControlRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{4}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{10}\n}", "func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_calculator_proto_calc_proto_rawDescGZIP(), []int{0}\n}", "func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}", "func (*HealthCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_proto_files_domain_probes_proto_rawDescGZIP(), []int{0}\n}", "func (*HealthCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_health_service_proto_rawDescGZIP(), []int{0}\n}", "func (*PingRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_crosstest_v1test_cross_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteMicroRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{4}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{10}\n}", "func (*CBroadcast_WebRTCStopped_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{47}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{12}\n}", "func (*CMsgProfileRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{275}\n}", "func (*GetSomesRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{5}\n}", "func (*CMsgLoadedRequest) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{46}\n}", "func (*DescribePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{6}\n}", "func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}", "func (*CBroadcast_UpdateChatMessageFlair_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{24}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_app_config_agent_cmd_grpcserver_proto_api_app_config_proto_rawDescGZIP(), []int{15}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{5}\n}", "func (*CheckPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_message_service_proto_rawDescGZIP(), []int{0}\n}", "func (*FeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{10}\n}", "func (*UnaryMapMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{10}\n}", "func (*SimpleRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_test_proto_rawDescGZIP(), []int{2}\n}", "func (*CheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{0}\n}", "func (x *fastReflection_Bech32PrefixRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Bech32PrefixRequest\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_service_face_detector_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{14}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_gnmi_gnmi_proto_rawDescGZIP(), []int{18}\n}", "func (*HeartbeatRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{12}\n}", "func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{0}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{4}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_kv_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteFriendsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{29}\n}", "func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{4}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_collector_collector_proto_rawDescGZIP(), []int{3}\n}" ]
[ "0.7447499", "0.7389765", "0.7389765", "0.7376233", "0.73273814", "0.7258341", "0.72158563", "0.7211136", "0.7184502", "0.71610767", "0.71385276", "0.7132852", "0.7113669", "0.6983269", "0.69459534", "0.6927341", "0.67780125", "0.6756823", "0.6741548", "0.67001593", "0.6670888", "0.6634616", "0.66287273", "0.6627337", "0.6606684", "0.66027045", "0.65918833", "0.6585244", "0.6583082", "0.6575219", "0.6568485", "0.6565675", "0.6563586", "0.6560971", "0.65582234", "0.6553738", "0.6539365", "0.65372", "0.65371054", "0.6533998", "0.65317684", "0.65299046", "0.6523171", "0.6521793", "0.6519171", "0.65165937", "0.65146035", "0.6505689", "0.6503984", "0.65000033", "0.6498041", "0.64977056", "0.6490221", "0.64807683", "0.64791703", "0.6476309", "0.64761335", "0.6475735", "0.6475188", "0.6474953", "0.6472988", "0.64723516", "0.6470984", "0.6464769", "0.64567626", "0.64556044", "0.6454943", "0.6449819", "0.644959", "0.64490646", "0.64478254", "0.6446261", "0.64442784", "0.64434457", "0.6441016", "0.64390785", "0.6438014", "0.64374804", "0.64353704", "0.6434444", "0.64340127", "0.6433515", "0.6431575", "0.6431464", "0.64283985", "0.6426795", "0.6424296", "0.64225686", "0.6422276", "0.6421651", "0.64147", "0.64123464", "0.64100206", "0.6409432", "0.64068115", "0.6406586", "0.64062226", "0.640511", "0.6402843", "0.6401389" ]
0.7184086
9
Deprecated: Use HelloResponse.ProtoReflect.Descriptor instead.
func (*HelloResponse) Descriptor() ([]byte, []int) { return file_proto_sample_proto_rawDescGZIP(), []int{1} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*HelloResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_hello_proto_rawDescGZIP(), []int{1}\n}", "func (*SayHelloResponse) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_proto_rawDescGZIP(), []int{1}\n}", "func (*SayHelloResponse) Descriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{15}\n}", "func (*HelloResponse) Descriptor() ([]byte, []int) {\n\treturn file_mkit_service_greeter_v1_greeter_proto_rawDescGZIP(), []int{1}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_hello_proto_rawDescGZIP(), []int{0}\n}", "func (*SayHelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{0}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{3}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloReply) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{6}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloservice_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloReply) Descriptor() ([]byte, []int) {\n\treturn file_helloservice_proto_rawDescGZIP(), []int{1}\n}", "func (*HelloReply) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{1}\n}", "func (*SayHelloResponse_Data) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*SayHelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{14}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (*HelloReply) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_proto_rawDescGZIP(), []int{1}\n}", "func (*GetPeerInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{28}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{1}\n}", "func (*ModifyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{11}\n}", "func (*Hello) Descriptor() ([]byte, []int) {\n\treturn file_proto_laptopService_proto_rawDescGZIP(), []int{3}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{2}\n}", "func (*ListenResponse) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{9}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{15}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_mkit_service_greeter_v1_greeter_proto_rawDescGZIP(), []int{0}\n}", "func (*GetMetricsInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{44}\n}", "func (x *fastReflection_AddressStringToBytesResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressStringToBytesResponse\n}", "func (x *fastReflection_MsgUpdateParamsResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgUpdateParamsResponse\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{0}\n}", "func (*GetVersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{31}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_sil_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloReply) Descriptor() ([]byte, []int) {\n\treturn file_sil_proto_rawDescGZIP(), []int{5}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_tutorial_proto_rawDescGZIP(), []int{1}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{17}\n}", "func (*CreateAlterResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{2}\n}", "func (*HelloReply) Descriptor() ([]byte, []int) {\n\treturn file_tutorial_proto_rawDescGZIP(), []int{2}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{11}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_config_hello_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_quickstart_greeter_proto_rawDescGZIP(), []int{0}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{4}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*HelloReply) Descriptor() ([]byte, []int) {\n\treturn file_basic_basic_proto_rawDescGZIP(), []int{2}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{1}\n}", "func (*Hello) Descriptor() ([]byte, []int) {\n\treturn file_hello_proto_rawDescGZIP(), []int{0}\n}", "func (*PingResponse) Descriptor() ([]byte, []int) {\n\treturn file_internal_crosstest_v1test_cross_proto_rawDescGZIP(), []int{1}\n}", "func (*DescribeResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{5}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_basic_basic_proto_rawDescGZIP(), []int{1}\n}", "func (*TelemetryResponse) Descriptor() ([]byte, []int) {\n\treturn file_automate_gateway_api_telemetry_telemetry_proto_rawDescGZIP(), []int{1}\n}", "func (*DiagnoseResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{17}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_runtime_v1_shim_v1_shim_proto_rawDescGZIP(), []int{2}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{8}\n}", "func (*ApiVersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{14}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_interservice_notifications_service_events_proto_rawDescGZIP(), []int{10}\n}", "func (*FindWebhookCallRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{9, 0}\n}", "func (*DeleteTeam_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{6, 0}\n}", "func (x *fastReflection_AddressBytesToStringResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressBytesToStringResponse\n}", "func (*GenerateMessageResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{1}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{1}\n}", "func (*LivenessCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{0}\n}", "func (*GetStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{45}\n}", "func (*GetTeamByName_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{2, 0}\n}", "func (*FindWebhookCallRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_newfindmaxpb_newfindmaxpb_proto_rawDescGZIP(), []int{1}\n}", "func (*GetSomesResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{6}\n}", "func (*StreamingResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{4}\n}", "func (*GetTeamById_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{1, 0}\n}", "func (x *fastReflection_Bech32PrefixResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Bech32PrefixResponse\n}", "func (*HealthCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_internal_proto_files_domain_probes_proto_rawDescGZIP(), []int{1}\n}", "func (*CheckLiveResponse) Descriptor() ([]byte, []int) {\n\treturn file_health_proto_rawDescGZIP(), []int{3}\n}", "func (*RefreshResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{17}\n}", "func (*HelloReply) Descriptor() ([]byte, []int) {\n\treturn file_config_hello_proto_rawDescGZIP(), []int{1}\n}", "func (*ProtoResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{2}\n}", "func (*GreetResponse) Descriptor() ([]byte, []int) {\n\treturn file_greet_proto_rawDescGZIP(), []int{1}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_server_proto_rawDescGZIP(), []int{0}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_wallet_proto_rawDescGZIP(), []int{8}\n}", "func (*ProxyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{5}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_v1_proto_rawDescGZIP(), []int{7}\n}", "func (*EndpointResponse) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{14}\n}", "func (*HelloReply) Descriptor() ([]byte, []int) {\n\treturn file_quickstart_greeter_proto_rawDescGZIP(), []int{1}\n}", "func (*WebhookResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{1}\n}", "func (*GreetResponse) Descriptor() ([]byte, []int) {\n\treturn file_greeter_greeterpb_greeter_proto_rawDescGZIP(), []int{2}\n}", "func (*GreetResponse) Descriptor() ([]byte, []int) {\n\treturn file_chat_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteEndpointApiResponse) Descriptor() ([]byte, []int) {\n\treturn file_endpoint_api_proto_rawDescGZIP(), []int{3}\n}", "func (*DeregisterResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{9}\n}", "func (*TestWebhookRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{8, 0}\n}", "func (*MetricsResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{25}\n}", "func (*CreateResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{4}\n}", "func (*SimpleResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_test_proto_rawDescGZIP(), []int{3}\n}", "func (*GetMessageResponse) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{12}\n}", "func (*UpdateTelemetryReportedResponse) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{30}\n}", "func (*ApplyResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{9}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_proto_service_proto_rawDescGZIP(), []int{2}\n}", "func (*LanguageDetectorResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_language_proto_rawDescGZIP(), []int{2}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_Notify_proto_rawDescGZIP(), []int{7}\n}", "func (*DeleteMessageResponse) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{14}\n}", "func (*DelResponse) Descriptor() ([]byte, []int) {\n\treturn file_patrol_proto_rawDescGZIP(), []int{9}\n}", "func (*FindWebhookRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{3, 0}\n}", "func (*InferResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{9}\n}" ]
[ "0.7270073", "0.7144889", "0.7027907", "0.69991654", "0.68775064", "0.68553793", "0.6839564", "0.6839564", "0.68357813", "0.682933", "0.68044037", "0.68022704", "0.6790483", "0.6787987", "0.6772483", "0.67659074", "0.67601085", "0.67564195", "0.67393154", "0.671657", "0.6709283", "0.670867", "0.66861886", "0.66799897", "0.6667233", "0.66610944", "0.6654677", "0.66474825", "0.66471547", "0.66217047", "0.6617534", "0.6604826", "0.6602085", "0.65956897", "0.6586768", "0.65757465", "0.6574776", "0.65723556", "0.65715605", "0.65563804", "0.6544345", "0.65351045", "0.6534471", "0.65272725", "0.65257335", "0.652519", "0.65245897", "0.6524295", "0.652287", "0.6520677", "0.6519312", "0.6519223", "0.651893", "0.6512254", "0.65102893", "0.64975184", "0.64909613", "0.64870375", "0.6481466", "0.6480128", "0.64740205", "0.6473704", "0.6473007", "0.6472315", "0.64658445", "0.646574", "0.64631695", "0.6462652", "0.64625597", "0.6459649", "0.6456917", "0.64551675", "0.6453084", "0.645166", "0.64505374", "0.64466137", "0.64462245", "0.64434296", "0.6441626", "0.6439274", "0.64283705", "0.6426427", "0.642412", "0.6421516", "0.64209414", "0.6420177", "0.64195836", "0.64186496", "0.64170176", "0.64167833", "0.6416548", "0.6415885", "0.6415388", "0.64148587", "0.6414285", "0.64142764", "0.641355", "0.6411835", "0.6411572", "0.6408438" ]
0.702858
2
Deprecated: Use CreateUserRequest.ProtoReflect.Descriptor instead.
func (*CreateUserRequest) Descriptor() ([]byte, []int) { return file_proto_sample_proto_rawDescGZIP(), []int{2} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{9}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{5}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{4}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{9}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_examplepb_example_proto_rawDescGZIP(), []int{3}\n}", "func (*GetOrCreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserReq) Descriptor() ([]byte, []int) {\n\treturn file_v1_user_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{11}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{11}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_view_grpc_blog_api_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_users_v1_users_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserReq) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{3}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_thoughts_proto_rawDescGZIP(), []int{0}\n}", "func (*CreatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{17}\n}", "func (*CreateTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{10}\n}", "func (*ProtoCreateUser) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserRequest_Data) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{2}\n}", "func (*AddUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{8}\n}", "func (*CreateRoleRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{9}\n}", "func (*AddUserToProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{19}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{3}\n}", "func (*CreateUserReply) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{6}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{15}\n}", "func (*AddUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{5}\n}", "func (*CreateUserReply) Descriptor() ([]byte, []int) {\n\treturn file_v1_user_proto_rawDescGZIP(), []int{3}\n}", "func (*WatchUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{5}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{11}\n}", "func (*AddUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_API_user_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{5}\n}", "func (*NewUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_Inspirit_service_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_pb_auth_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserReply) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{4}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{7}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpcuser_proto_user_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{10}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_user_user_proto_rawDescGZIP(), []int{11}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{9}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{7}\n}", "func (*CreateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{3}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{10}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{11}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{4}\n}", "func (*AddUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{13}\n}", "func (*CreateUserDevice) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{23}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{3}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_user_user_proto_rawDescGZIP(), []int{9}\n}", "func (*GetOrCreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{7}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{3}\n}", "func (*CreateNotificationRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{11}\n}", "func (*UpdatePasswordRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{16}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{0}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateOrganizationRequest_User) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_auth_proto_rawDescGZIP(), []int{6, 1}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{5}\n}", "func (*UpdateUserByUsernameRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{11}\n}", "func (*UserAddReq) Descriptor() ([]byte, []int) {\n\treturn file_api_interface_v1_user_proto_rawDescGZIP(), []int{0}\n}", "func (*SaveUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{3}\n}", "func (*NewUser) Descriptor() ([]byte, []int) {\n\treturn file_usermanage_usermanage_proto_rawDescGZIP(), []int{0}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_myapp_user_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteUserReq) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{7}\n}", "func (*PauseUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{11}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_examplepb_example_proto_rawDescGZIP(), []int{4}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_pb_user_proto_rawDescGZIP(), []int{0}\n}", "func (*WatchUsersRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{7}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{10}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{0}\n}", "func (*ListUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{6}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{6}\n}", "func (*DisconnectUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{18}\n}", "func (*TwoFactorAuthRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{19}\n}", "func (*CreatePermissionRequest_Data) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{17, 0}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_Inspirit_service_proto_rawDescGZIP(), []int{6}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_hezzel_proto_rawDescGZIP(), []int{3}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_activity_proto_rawDescGZIP(), []int{13}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_view_grpc_blog_api_proto_rawDescGZIP(), []int{8}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{16}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{3}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_user_user_proto_rawDescGZIP(), []int{7}\n}", "func (*ReportUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{26}\n}", "func (*SignupRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{5}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_userapi_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_view_grpc_blog_api_proto_rawDescGZIP(), []int{3}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{3}\n}", "func (*GetUserByUsernameWithPasswordRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{5}\n}" ]
[ "0.79096454", "0.788088", "0.7824297", "0.7808451", "0.77528954", "0.7738215", "0.7722104", "0.7667685", "0.76562274", "0.7611232", "0.7585692", "0.74919266", "0.74665475", "0.7440994", "0.7440994", "0.7429693", "0.7428761", "0.7415253", "0.7392637", "0.7294176", "0.7281565", "0.7165189", "0.71433365", "0.7119209", "0.7114943", "0.7105652", "0.70942146", "0.70891833", "0.7062458", "0.7056279", "0.7055249", "0.7052401", "0.70188093", "0.7016263", "0.7007507", "0.7002371", "0.6992708", "0.699154", "0.6976548", "0.69626415", "0.6950102", "0.69378144", "0.6929998", "0.6927186", "0.6923404", "0.6912213", "0.69094056", "0.6904198", "0.68889827", "0.68886405", "0.68781537", "0.6873324", "0.6872682", "0.68599546", "0.6857215", "0.685079", "0.68415034", "0.6825154", "0.6824944", "0.6823094", "0.6822159", "0.6808205", "0.68046457", "0.68030113", "0.67955357", "0.6784382", "0.6780522", "0.67762244", "0.6773145", "0.6762311", "0.67575556", "0.6746558", "0.6740081", "0.6736703", "0.67274827", "0.6723549", "0.6703054", "0.67001426", "0.6682001", "0.66750425", "0.66724855", "0.6668891", "0.66674584", "0.6665559", "0.6664993", "0.66625667", "0.665716", "0.66531086", "0.66490334", "0.6645559", "0.66393083", "0.66320735", "0.6629743", "0.6620163", "0.6618497", "0.66172385", "0.66156465", "0.6612126", "0.6605449", "0.6605203" ]
0.7592327
10
Deprecated: Use CreateUserResponse.ProtoReflect.Descriptor instead.
func (*CreateUserResponse) Descriptor() ([]byte, []int) { return file_proto_sample_proto_rawDescGZIP(), []int{3} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{3}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{5}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{9}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_pb_auth_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{10}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{5}\n}", "func (*GetOrCreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{4}\n}", "func (*CreateUserReply) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{9}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserReply) Descriptor() ([]byte, []int) {\n\treturn file_v1_user_proto_rawDescGZIP(), []int{3}\n}", "func (*GetOrCreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_view_grpc_blog_api_proto_rawDescGZIP(), []int{3}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{2}\n}", "func (*ProtoCreateUser) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_examplepb_example_proto_rawDescGZIP(), []int{3}\n}", "func (*CreateUserReply) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{4}\n}", "func (*CreateRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_users_v1_users_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserReq) Descriptor() ([]byte, []int) {\n\treturn file_v1_user_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_view_grpc_blog_api_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserReq) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{3}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_thoughts_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateTokenResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{11}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{11}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{11}\n}", "func (*UserResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{11}\n}", "func (*CreateResult) Descriptor() ([]byte, []int) {\n\treturn file_api_users_v1_users_proto_rawDescGZIP(), []int{1}\n}", "func (*CreatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{17}\n}", "func (*AddUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{9}\n}", "func (*CreateUserDevice) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{23}\n}", "func (*CreateUserRequest_Data) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*AddUserToProjectResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{20}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{3}\n}", "func (*AddUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_API_user_proto_rawDescGZIP(), []int{1}\n}", "func (*AddUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{10}\n}", "func (*WatchUserResponse) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{6}\n}", "func (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_user_user_proto_rawDescGZIP(), []int{12}\n}", "func (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{14}\n}", "func (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{10}\n}", "func (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{8}\n}", "func (*NewUser) Descriptor() ([]byte, []int) {\n\treturn file_usermanage_usermanage_proto_rawDescGZIP(), []int{0}\n}", "func (*AddUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_API_user_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{2}\n}", "func (*UserResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpcuser_proto_user_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{8}\n}", "func (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{12}\n}", "func (*AddUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{8}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{11}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{7}\n}", "func (*CreateRoleRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{9}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{4}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{9}\n}", "func (*NewUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_Inspirit_service_proto_rawDescGZIP(), []int{3}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{15}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_user_user_proto_rawDescGZIP(), []int{11}\n}", "func (*CreateOrganizationRequest_User) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_auth_proto_rawDescGZIP(), []int{6, 1}\n}", "func (*UpdateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{8}\n}", "func (*AddUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{5}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_user_user_proto_rawDescGZIP(), []int{10}\n}", "func (*NewUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_Inspirit_service_proto_rawDescGZIP(), []int{2}\n}", "func (*UserResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_user_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{13}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{11}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{7}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{2}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpcuser_proto_user_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateUserByUsernameResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{12}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{1}\n}", "func (*AddTeamUser_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{8, 0}\n}", "func (*GetUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{6}\n}", "func (*GetUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{8}\n}", "func (*ListUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{7}\n}", "func (*GetUserByUsernameWithPasswordResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{6}\n}", "func (*UserModifyResp) Descriptor() ([]byte, []int) {\n\treturn file_api_interface_v1_user_proto_rawDescGZIP(), []int{1}\n}", "func (*UserResponse) Descriptor() ([]byte, []int) {\n\treturn file_myapp_user_proto_rawDescGZIP(), []int{2}\n}", "func (*WatchUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{5}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_GrpcServices_auth_proto_rawDescGZIP(), []int{2}\n}", "func (*RemoveTeamUser_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{9, 0}\n}", "func (*AddUserToProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{19}\n}", "func (*SaveUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{2}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_proto_rawDescGZIP(), []int{0}\n}", "func (*ListUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{8}\n}", "func (*TokenResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{12}\n}", "func (*GetUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{1}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{10}\n}", "func (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_view_grpc_blog_api_proto_rawDescGZIP(), []int{9}\n}", "func (*GetUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{4}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{2}\n}" ]
[ "0.7710605", "0.7689471", "0.7646486", "0.7620154", "0.760239", "0.75994676", "0.75860673", "0.75829166", "0.7570726", "0.75582385", "0.7552537", "0.7528463", "0.7508784", "0.7497497", "0.7491163", "0.74756175", "0.7465579", "0.7414428", "0.73890996", "0.7377724", "0.7337647", "0.7335472", "0.7331645", "0.73305947", "0.731908", "0.7287703", "0.72785443", "0.7243561", "0.72357863", "0.72239417", "0.7212378", "0.72035205", "0.72035205", "0.717642", "0.71532226", "0.7122926", "0.71152246", "0.70450926", "0.7037033", "0.70352876", "0.7024295", "0.70223016", "0.7022275", "0.7012971", "0.6999538", "0.69805557", "0.6977367", "0.69639724", "0.6963499", "0.6957035", "0.69474155", "0.6946338", "0.69360167", "0.69317013", "0.6923385", "0.6919684", "0.689562", "0.68893415", "0.6882772", "0.6882306", "0.6879913", "0.6878224", "0.6874854", "0.686732", "0.68601364", "0.68579", "0.6853086", "0.6851219", "0.6849992", "0.6848792", "0.68384033", "0.68336314", "0.6833215", "0.683055", "0.68251467", "0.682474", "0.68203205", "0.6817049", "0.68119437", "0.6809148", "0.6807572", "0.68042576", "0.6796458", "0.6787019", "0.6778058", "0.67765856", "0.6772564", "0.67723185", "0.67708373", "0.67658645", "0.6765599", "0.6752366", "0.6740607", "0.67293376", "0.67290473", "0.67195666", "0.6718196", "0.67162365", "0.6714324", "0.67140174" ]
0.74742067
16
Deprecated: Use ProxyRequest.ProtoReflect.Descriptor instead.
func (*ProxyRequest) Descriptor() ([]byte, []int) { return file_proto_sample_proto_rawDescGZIP(), []int{4} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{5}\n}", "func (*UpdateReverseProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{9}\n}", "func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{8}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*WebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{0}\n}", "func (*AllowRequest) Descriptor() ([]byte, []int) {\n\treturn file_config_module_proxy_v1_proxy_proto_rawDescGZIP(), []int{2}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{7}\n}", "func (*CreateReverseProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{0}\n}", "func (*TelemetryRequest) Descriptor() ([]byte, []int) {\n\treturn file_automate_gateway_api_telemetry_telemetry_proto_rawDescGZIP(), []int{0}\n}", "func (*FindEnabledReverseProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{2}\n}", "func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}", "func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}", "func (*ConfigRequest_V1_System_Proxy) Descriptor() ([]byte, []int) {\n\treturn file_config_deployment_config_request_proto_rawDescGZIP(), []int{0, 0, 0, 3}\n}", "func (*ControlRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_gateway_v1_control_proto_rawDescGZIP(), []int{0}\n}", "func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{46}\n}", "func (*CMsgClientToGCPlayerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{143}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_internal_services_profile_proto_profile_proto_rawDescGZIP(), []int{0}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_ocis_messages_policies_v0_policies_proto_rawDescGZIP(), []int{2}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}\n}", "func (x *fastReflection_QueryParamsRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_QueryParamsRequest\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_Trd_ModifyOrder_proto_rawDescGZIP(), []int{2}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*BatchUpdateReferencesRequest_Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_icas_icas_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_comments_proto_rawDescGZIP(), []int{3}\n}", "func (*CMsgGCPlayerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{117}\n}", "func (*DiagnoseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{16}\n}", "func (*GeneratedRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_auth_proto_rawDescGZIP(), []int{0}\n}", "func (*OriginalDetectIntentRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateHookRequest) Descriptor() ([]byte, []int) {\n\treturn file_hook_proto_rawDescGZIP(), []int{4}\n}", "func (*CodeLensRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{163}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{1}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{0}\n}", "func (*DelRequest) Descriptor() ([]byte, []int) {\n\treturn file_patrol_proto_rawDescGZIP(), []int{8}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{6}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{0}\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{14}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_authorization_proto_rawDescGZIP(), []int{0}\n}", "func (*TelemetryRequest) Descriptor() ([]byte, []int) {\n\treturn file_interservice_license_control_license_control_proto_rawDescGZIP(), []int{11}\n}", "func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}", "func (*FindEnabledReverseProxyConfigRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{4}\n}", "func (*PaqueteRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}\n}", "func (*CalculatorRequest) Descriptor() ([]byte, []int) {\n\treturn file_basicpb_unary_api_proto_rawDescGZIP(), []int{4}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{12}\n}", "func (*PolicyRequest) Descriptor() ([]byte, []int) {\n\treturn file_policypb_policy_proto_rawDescGZIP(), []int{0}\n}", "func (*RelationshipRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{3}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_service_face_detector_proto_rawDescGZIP(), []int{0}\n}", "func (*ForwardRequest) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{13}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{1}\n}", "func (*CMsgProfileRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{275}\n}", "func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_calculator_proto_calc_proto_rawDescGZIP(), []int{0}\n}", "func (*FeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{10}\n}", "func (*RequestPresentationRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{0}\n}", "func (*LanguageDetectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_language_proto_rawDescGZIP(), []int{1}\n}", "func (*PortRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_api_proto_rawDescGZIP(), []int{1}\n}", "func (*LogMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{59}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_newfindmaxpb_newfindmaxpb_proto_rawDescGZIP(), []int{0}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{0}\n}", "func (*EndpointRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{13}\n}", "func (*UpdateReverseProxySchedulingRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{6}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{10}\n}", "func (*MeshCertificateRequest) Descriptor() ([]byte, []int) {\n\treturn file_security_proto_providers_google_meshca_proto_rawDescGZIP(), []int{0}\n}", "func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_auth_proto_rawDescGZIP(), []int{1}\n}", "func (*ModelControlRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{4}\n}", "func (*BatchRequest_Request) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_buildbucket_proto_builds_service_proto_rawDescGZIP(), []int{3, 0}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_api_api_proto_rawDescGZIP(), []int{3}\n}", "func (*CheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{0}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_transformer_request_request_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateConversationRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{8}\n}", "func (*WatchLimitsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_limit_service_proto_rawDescGZIP(), []int{7}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_index_faults_rpc_rpc_proto_rawDescGZIP(), []int{2}\n}", "func (x *fastReflection_AddressBytesToStringRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressBytesToStringRequest\n}", "func (*ProbeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_gateway_v1_control_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateLimitRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_limit_service_proto_rawDescGZIP(), []int{9}\n}", "func (*GetCollectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{163}\n}", "func (*RevokeCertificateRequest) Descriptor() ([]byte, []int) {\n\treturn file_majordomo_proto_rawDescGZIP(), []int{18}\n}", "func (*ModifyGatewayRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_protobuf_api_proto_rawDescGZIP(), []int{20}\n}", "func (*CheckPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func (*ValidateClientCredentialRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{0}\n}", "func (*MoneyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_swap_swap_proto_rawDescGZIP(), []int{0}\n}", "func (*SendRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{5}\n}", "func (*ReferenceRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{141}\n}", "func (*UnaryMapMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{10}\n}", "func (*WatchRequest) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_watch_service_proto_rawDescGZIP(), []int{0}\n}", "func (*PatchKeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{74}\n}", "func (x *fastReflection_AddressStringToBytesRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressStringToBytesRequest\n}", "func (*GetServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{6}\n}", "func (*CBroadcast_WebRTCLookupTURNServer_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{51}\n}", "func (*LogTraceRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{7}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_auth_svr_proto_rawDescGZIP(), []int{0}\n}", "func (*RevokeJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20}\n}", "func (*UpdateIngressRuleRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{26}\n}", "func (*DebugInstanceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{19}\n}", "func (*RevokeFactoryCertificateRequest) Descriptor() ([]byte, []int) {\n\treturn file_token_proto_rawDescGZIP(), []int{15}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_kv_proto_rawDescGZIP(), []int{0}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{2}\n}", "func (*CMsgClientToGCWageringRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{169}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{17}\n}" ]
[ "0.7247715", "0.6779651", "0.6682698", "0.666076", "0.6614722", "0.66008824", "0.6595267", "0.6572151", "0.65490466", "0.65459406", "0.6542178", "0.65279186", "0.6515722", "0.6475382", "0.64694345", "0.6464632", "0.6456122", "0.64516634", "0.6447801", "0.6447006", "0.6446291", "0.64417124", "0.64405996", "0.64364487", "0.6428933", "0.642216", "0.6420503", "0.6416612", "0.6410761", "0.6406956", "0.640652", "0.6402035", "0.63975835", "0.639466", "0.6389269", "0.6387323", "0.6385673", "0.63826084", "0.63777196", "0.63774294", "0.63715506", "0.63663566", "0.63555115", "0.63550776", "0.6353889", "0.6352399", "0.6352264", "0.6349992", "0.6348139", "0.63468164", "0.63310033", "0.63258785", "0.6322138", "0.6318866", "0.6318571", "0.6317953", "0.6309605", "0.63078237", "0.63036805", "0.63000757", "0.62954384", "0.62934643", "0.6289233", "0.6284775", "0.62841296", "0.627979", "0.62792087", "0.62780565", "0.62780535", "0.62777436", "0.6275932", "0.62755126", "0.6273063", "0.6272292", "0.62687516", "0.62686884", "0.6265534", "0.62652445", "0.6263769", "0.62568665", "0.62556905", "0.62542194", "0.6254178", "0.62531054", "0.6252966", "0.62527084", "0.62525374", "0.62513393", "0.6247987", "0.6247627", "0.62460774", "0.62451893", "0.6244681", "0.62421703", "0.6240624", "0.62400013", "0.62386394", "0.6237912", "0.6234693", "0.62345165" ]
0.7129833
1
Deprecated: Use ProxyResponse.ProtoReflect.Descriptor instead.
func (*ProxyResponse) Descriptor() ([]byte, []int) { return file_proto_sample_proto_rawDescGZIP(), []int{5} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*ProxyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{6}\n}", "func (*ListenResponse) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{9}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (x *fastReflection_MsgUpdateParamsResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgUpdateParamsResponse\n}", "func (*FindEnabledReverseProxyResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{3}\n}", "func (*RefreshResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{17}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*CreateReverseProxyResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{1}\n}", "func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{5}\n}", "func (*GetPeerInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{28}\n}", "func (*TelemetryResponse) Descriptor() ([]byte, []int) {\n\treturn file_automate_gateway_api_telemetry_telemetry_proto_rawDescGZIP(), []int{1}\n}", "func (*ModifyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{11}\n}", "func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{4}\n}", "func (*DelResponse) Descriptor() ([]byte, []int) {\n\treturn file_patrol_proto_rawDescGZIP(), []int{9}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{4}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{15}\n}", "func (*DiagnoseResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{17}\n}", "func (*DecodeReply) Descriptor() ([]byte, []int) {\n\treturn file_proto_videoservice_proto_rawDescGZIP(), []int{1}\n}", "func (*GetMetricsInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{44}\n}", "func (*ProtoResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{2}\n}", "func (x *fastReflection_MsgDepositValidatorRewardsPoolResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgDepositValidatorRewardsPoolResponse\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{2}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{1}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_newfindmaxpb_newfindmaxpb_proto_rawDescGZIP(), []int{1}\n}", "func (*CodeLensResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{32}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{3}\n}", "func (*LanguageDetectorResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_language_proto_rawDescGZIP(), []int{2}\n}", "func (*ReferenceResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{28}\n}", "func (*FindEnabledReverseProxyConfigResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{5}\n}", "func (*HoverResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{18}\n}", "func (*TelemetryResponse) Descriptor() ([]byte, []int) {\n\treturn file_interservice_license_control_license_control_proto_rawDescGZIP(), []int{12}\n}", "func (*WebhookResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{1}\n}", "func (*ApplyResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{1}\n}", "func (*ApiVersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{14}\n}", "func (*RevokeFactoryCertificateResponse) Descriptor() ([]byte, []int) {\n\treturn file_token_proto_rawDescGZIP(), []int{16}\n}", "func (x *fastReflection_AddressStringToBytesResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressStringToBytesResponse\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_wallet_proto_rawDescGZIP(), []int{8}\n}", "func (*CodeLensResolveResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{34}\n}", "func (x *fastReflection_QueryParamsResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_QueryParamsResponse\n}", "func (*MetricsResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{25}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_Notify_proto_rawDescGZIP(), []int{7}\n}", "func (*GetStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{45}\n}", "func (*GetVersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{31}\n}", "func (*UpdateReverseProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{9}\n}", "func (*CMsgProfileResponse) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{276}\n}", "func (*UpdateResponse) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{7}\n}", "func (*FindWebhookCallRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*DefinitionResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{25}\n}", "func (*FindWebhookCallRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{9, 0}\n}", "func (*GeneratedResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_auth_proto_rawDescGZIP(), []int{1}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{1}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_Trd_ModifyOrder_proto_rawDescGZIP(), []int{3}\n}", "func (*EndpointResponse) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{14}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{17}\n}", "func (*DeleteEndpointApiResponse) Descriptor() ([]byte, []int) {\n\treturn file_endpoint_api_proto_rawDescGZIP(), []int{3}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_proto_service_proto_rawDescGZIP(), []int{2}\n}", "func (x *fastReflection_AddressBytesToStringResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressBytesToStringResponse\n}", "func (*ControlResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_gateway_v1_control_proto_rawDescGZIP(), []int{1}\n}", "func (*RevokeCertificateResponse) Descriptor() ([]byte, []int) {\n\treturn file_majordomo_proto_rawDescGZIP(), []int{19}\n}", "func (*UpdateEndpointApiResponse) Descriptor() ([]byte, []int) {\n\treturn file_endpoint_api_proto_rawDescGZIP(), []int{4}\n}", "func (*MoneyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_swap_swap_proto_rawDescGZIP(), []int{1}\n}", "func (*GetResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_comments_proto_rawDescGZIP(), []int{4}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{1}\n}", "func (*DescribeResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{5}\n}", "func (*UpdateRemoteMirrorResponse) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{1}\n}", "func (*ScheduleDownlinkResponse) Descriptor() ([]byte, []int) {\n\treturn file_ttn_lorawan_v3_gatewayserver_proto_rawDescGZIP(), []int{2}\n}", "func (*APIResponse) Descriptor() ([]byte, []int) {\n\treturn file_protos_operands_proto_rawDescGZIP(), []int{1}\n}", "func (*WatchResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{24}\n}", "func (*RenameResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{43}\n}", "func (*WatchLimitsResponse) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_limit_service_proto_rawDescGZIP(), []int{8}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_runtime_v1_shim_v1_shim_proto_rawDescGZIP(), []int{2}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{8}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*PolicyResponse) Descriptor() ([]byte, []int) {\n\treturn file_policypb_policy_proto_rawDescGZIP(), []int{1}\n}", "func (*PingResponse) Descriptor() ([]byte, []int) {\n\treturn file_internal_crosstest_v1test_cross_proto_rawDescGZIP(), []int{1}\n}", "func (*PerformanceResponse) Descriptor() ([]byte, []int) {\n\treturn file_commissionService_proto_rawDescGZIP(), []int{5}\n}", "func (*GetPolicyResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_policy_proto_rawDescGZIP(), []int{3}\n}", "func (*StreamingResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{4}\n}", "func (*GenerateProductMixIdeasResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{9}\n}", "func (*CheckLiveResponse) Descriptor() ([]byte, []int) {\n\treturn file_health_proto_rawDescGZIP(), []int{3}\n}", "func (*SendResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{27}\n}", "func (*CollectResponse) Descriptor() ([]byte, []int) {\n\treturn file_orc8r_cloud_go_services_analytics_protos_collector_proto_rawDescGZIP(), []int{1}\n}", "func (*LookupResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_ip_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{11}\n}", "func (*GPULabelResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{11}\n}", "func (*ReportLoadResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{6}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{4}\n}", "func (*DeleteTeam_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{6, 0}\n}", "func (*WatchResponse) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_watch_service_proto_rawDescGZIP(), []int{1}\n}", "func (*AddResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_calculator_proto_calc_proto_rawDescGZIP(), []int{1}\n}", "func (*InvokeResponse) Descriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{19}\n}", "func (*DiffResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{3}\n}", "func (*RequestPresentationResponse) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{4}\n}", "func (x *fastReflection_MsgDepositValidatorRewardsPoolResponse) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch descriptor.FullName() {\n\tdefault:\n\t\tif descriptor.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.distribution.v1beta1.MsgDepositValidatorRewardsPoolResponse\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.distribution.v1beta1.MsgDepositValidatorRewardsPoolResponse does not contain field %s\", descriptor.FullName()))\n\t}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_interservice_notifications_service_events_proto_rawDescGZIP(), []int{10}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_internal_tool_grpctool_test_test_proto_rawDescGZIP(), []int{1}\n}", "func (*ApiReply) Descriptor() ([]byte, []int) {\n\treturn file_api_sso_api_proto_rawDescGZIP(), []int{0}\n}", "func (*ConnectResponse) Descriptor() ([]byte, []int) {\n\treturn file_voice_v1_voice_proto_rawDescGZIP(), []int{2}\n}", "func (*AddMockResponse) Descriptor() ([]byte, []int) {\n\treturn file_mocking_service_proto_rawDescGZIP(), []int{1}\n}", "func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{8}\n}" ]
[ "0.7105832", "0.67619485", "0.66736144", "0.6664114", "0.6658084", "0.66193265", "0.6603035", "0.65962255", "0.6588789", "0.6581428", "0.65801454", "0.6566071", "0.65583223", "0.6553807", "0.654606", "0.6514046", "0.6500954", "0.6491214", "0.64790255", "0.64641875", "0.6447891", "0.6446242", "0.6428887", "0.6427094", "0.64267504", "0.64205855", "0.64156085", "0.64126545", "0.6410357", "0.64071524", "0.6397559", "0.6396818", "0.6392832", "0.6388734", "0.638372", "0.6371738", "0.63717306", "0.63662374", "0.63659734", "0.6364437", "0.6363253", "0.6362537", "0.63607407", "0.63596547", "0.63522", "0.6351756", "0.6348848", "0.634661", "0.6346154", "0.63442254", "0.6339857", "0.63389367", "0.63364834", "0.6335869", "0.63356864", "0.6332598", "0.63314676", "0.633114", "0.6330086", "0.6328261", "0.6326223", "0.6325865", "0.63254106", "0.6325314", "0.6322754", "0.6320203", "0.6318349", "0.63164866", "0.63153404", "0.63134706", "0.63116825", "0.6309125", "0.63086885", "0.6308459", "0.6302246", "0.6301904", "0.6300046", "0.6299774", "0.6297662", "0.6297243", "0.6293745", "0.6289921", "0.6288901", "0.62881845", "0.6287012", "0.6286923", "0.6286328", "0.62859875", "0.6285729", "0.62837565", "0.62833023", "0.62828314", "0.6281234", "0.62810045", "0.6279544", "0.6278746", "0.62763035", "0.627567", "0.62749493", "0.62736815" ]
0.6940781
1
Native returns a pointer to the underlying PangoLayout.
func (v *Context) Native() uintptr { return uintptr(unsafe.Pointer(v.native())) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (tv *TextView) ParentLayout() *gi.Layout {\n\tif tv.Par == nil {\n\t\treturn nil\n\t}\n\tpari, _ := gi.KiToNode2D(tv.Par)\n\treturn pari.AsLayout2D()\n}", "func (w *WidgetImplement) Layout() Layout {\n\treturn w.layout\n}", "func (obj *GenericMeasure) GetLayoutRaw(ctx context.Context) (json.RawMessage, error) {\n\tresult := &struct {\n\t\tLayout json.RawMessage `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetLayout\", result)\n\treturn result.Layout, err\n}", "func (obj *Doc) GetAppLayoutRaw(ctx context.Context) (json.RawMessage, error) {\n\tresult := &struct {\n\t\tLayout json.RawMessage `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetAppLayout\", result)\n\treturn result.Layout, err\n}", "func (obj *GenericBookmark) GetLayoutRaw(ctx context.Context) (json.RawMessage, error) {\n\tresult := &struct {\n\t\tLayout json.RawMessage `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetLayout\", result)\n\treturn result.Layout, err\n}", "func (obj *GenericMeasure) GetLayout(ctx context.Context) (*GenericMeasureLayout, error) {\n\tresult := &struct {\n\t\tLayout *GenericMeasureLayout `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetLayout\", result)\n\treturn result.Layout, err\n}", "func (obj *GenericDimension) GetLayoutRaw(ctx context.Context) (json.RawMessage, error) {\n\tresult := &struct {\n\t\tLayout json.RawMessage `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetLayout\", result)\n\treturn result.Layout, err\n}", "func (obj *GenericBookmark) GetLayout(ctx context.Context) (*GenericBookmarkLayout, error) {\n\tresult := &struct {\n\t\tLayout *GenericBookmarkLayout `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetLayout\", result)\n\treturn result.Layout, err\n}", "func newAdjustmentFromNative(obj unsafe.Pointer) interface{} {\n\ta := &Adjustment{}\n\ta.object = C.to_GtkAdjustment(obj)\n\n\tif gobject.IsObjectFloating(a) {\n\t\tgobject.RefSink(a)\n\t} else {\n\t\tgobject.Ref(a)\n\t}\n\tadjustmentFinalizer(a)\n\n\treturn a\n}", "func (obj *Doc) GetAppLayout(ctx context.Context) (*NxAppLayout, error) {\n\tresult := &struct {\n\t\tLayout *NxAppLayout `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetAppLayout\", result)\n\treturn result.Layout, err\n}", "func (obj *GenericVariable) GetLayoutRaw(ctx context.Context) (json.RawMessage, error) {\n\tresult := &struct {\n\t\tLayout json.RawMessage `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetLayout\", result)\n\treturn result.Layout, err\n}", "func (obj *GenericVariable) GetLayout(ctx context.Context) (*GenericVariableLayout, error) {\n\tresult := &struct {\n\t\tLayout *GenericVariableLayout `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetLayout\", result)\n\treturn result.Layout, err\n}", "func alloc(size uintptr, layout unsafe.Pointer) unsafe.Pointer", "func (obj *GenericDimension) GetLayout(ctx context.Context) (*GenericDimensionLayout, error) {\n\tresult := &struct {\n\t\tLayout *GenericDimensionLayout `json:\"qLayout\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetLayout\", result)\n\treturn result.Layout, err\n}", "func (m *PrinterDefaults) GetMultipageLayout()(*PrintMultipageLayout) {\n val, err := m.GetBackingStore().Get(\"multipageLayout\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*PrintMultipageLayout)\n }\n return nil\n}", "func (l *Selectable) Layout(gtx layout.Context, lt *text.Shaper, font font.Font, size unit.Sp, textMaterial, selectionMaterial op.CallOp) layout.Dimensions {\n\tl.initialize()\n\tl.text.LineHeight = l.LineHeight\n\tl.text.LineHeightScale = l.LineHeightScale\n\tl.text.Alignment = l.Alignment\n\tl.text.MaxLines = l.MaxLines\n\tl.text.Truncator = l.Truncator\n\tl.text.WrapPolicy = l.WrapPolicy\n\tl.text.Update(gtx, lt, font, size, l.handleEvents)\n\tdims := l.text.Dimensions()\n\tdefer clip.Rect(image.Rectangle{Max: dims.Size}).Push(gtx.Ops).Pop()\n\tpointer.CursorText.Add(gtx.Ops)\n\tvar keys key.Set\n\tif l.focused {\n\t\tconst keyFilterAllArrows = \"(ShortAlt)-(Shift)-[←,→,↑,↓]|(Shift)-[⏎,⌤]|(ShortAlt)-(Shift)-[⌫,⌦]|(Shift)-[⇞,⇟,⇱,⇲]|Short-[C,V,X,A]|Short-(Shift)-Z\"\n\t\tkeys = keyFilterAllArrows\n\t}\n\tkey.InputOp{Tag: l, Keys: keys}.Add(gtx.Ops)\n\tif l.requestFocus {\n\t\tkey.FocusOp{Tag: l}.Add(gtx.Ops)\n\t\tkey.SoftKeyboardOp{Show: true}.Add(gtx.Ops)\n\t}\n\tl.requestFocus = false\n\n\tl.clicker.Add(gtx.Ops)\n\tl.dragger.Add(gtx.Ops)\n\n\tl.paintSelection(gtx, selectionMaterial)\n\tl.paintText(gtx, textMaterial)\n\treturn dims\n}", "func (r *SnpExtendedReportReqABI) Pointer() unsafe.Pointer {\n\treturn unsafe.Pointer(r)\n}", "func (s *Menu) Layout(outsideWidth, outsideHeight int) (int, int) {\n\n\txCenter := outsideWidth / 2\n\tyPlaces := []int{} // golang gotcha: can't use len(s.widgets) to make an array\n\tslots := len(s.widgets) + 1\n\tfor i := 0; i < slots; i++ {\n\t\tyPlaces = append(yPlaces, (outsideHeight/slots)*i)\n\t}\n\n\tfor i, w := range s.widgets {\n\t\tw.SetPosition(xCenter, yPlaces[i+1])\n\t}\n\n\treturn outsideWidth, outsideHeight\n}", "func (r *SnpReportReqABI) Pointer() unsafe.Pointer {\n\treturn unsafe.Pointer(r)\n}", "func NewPageLayout(orientation, unit string, width, height float64) (s *PageLayout, e error) {\n\tif orientation == \"\" {\n\t\torientation = \"L\"\n\t}\n\n\tif unit == \"\" {\n\t\tunit = \"mm\"\n\t}\n\n\tif width <= 0 {\n\t\te = errors.New(\"The width of the page needs to be bigger than 0.\")\n\t\treturn\n\t}\n\n\tif height <= 0 {\n\t\te = errors.New(\"The height of the page needs to be bigger than 0.\")\n\t\treturn\n\t}\n\n\ts = &PageLayout{\n\t\torientation: orientation,\n\t\tunit: unit,\n\t\twidth: width,\n\t\theight: height,\n\t}\n\treturn\n}", "func (i *InteractiveSpan) Layout(gtx layout.Context) layout.Dimensions {\n\ti.click.Add(gtx.Ops)\n\tfor _, e := range i.click.Events(gtx) {\n\t\tswitch e.Type {\n\t\tcase gesture.TypeClick:\n\t\t\tif i.longPressed {\n\t\t\t\ti.longPressed = false\n\t\t\t} else {\n\t\t\t\ti.events = append(i.events, Event{Type: Click, ClickData: e})\n\t\t\t}\n\t\t\ti.pressing = false\n\t\tcase gesture.TypePress:\n\t\t\ti.pressStarted = gtx.Now\n\t\t\ti.pressing = true\n\t\tcase gesture.TypeCancel:\n\t\t\ti.pressing = false\n\t\t\ti.longPressed = false\n\t\t}\n\t}\n\tif i.click.Hovered() {\n\t\ti.events = append(i.events, Event{Type: Hover})\n\t}\n\n\tif !i.longPressed && i.pressing && gtx.Now.Sub(i.pressStarted) > LongPressDuration {\n\t\ti.events = append(i.events, Event{Type: LongPress})\n\t\ti.longPressed = true\n\t}\n\n\tif i.pressing && !i.longPressed {\n\t\top.InvalidateOp{}.Add(gtx.Ops)\n\t}\n\treturn layout.Dimensions{}\n}", "func (r *SnpReportRespABI) Pointer() unsafe.Pointer {\n\treturn unsafe.Pointer(r)\n}", "func (v *SourceLanguageManager) native() *C.GtkSourceLanguageManager {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkSourceLanguageManager(p)\n}", "func (v *TextView) native() *C.GtkTextView {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkTextView(p)\n}", "func (w *sliderElement) Layout(bc base.Constraints) base.Size {\n\tif !bc.HasBoundedWidth() && !bc.HasBoundedHeight() {\n\t\t// No need to worry about breaking the constraints. We can take as\n\t\t// much space as desired.\n\t\twidth := w.MinIntrinsicWidth(base.Inf)\n\t\t_, height := w.handle.GetPreferredHeight()\n\t\t// Dimensions may need to be increased to meet minimums.\n\t\treturn bc.Constrain(base.Size{width, base.FromPixelsY(height)})\n\t}\n\tif !bc.HasBoundedHeight() {\n\t\t// No need to worry about height. Find the width that best meets the\n\t\t// widgets preferred width.\n\t\twidth := bc.ConstrainWidth(w.MinIntrinsicWidth(base.Inf))\n\t\t// Get the best height for this width.\n\t\t_, height := syscall.WidgetGetPreferredHeightForWidth(w.handle, width.PixelsX())\n\t\t// Height may need to be increased to meet minimum.\n\t\treturn base.Size{width, bc.ConstrainHeight(base.FromPixelsY(height))}\n\t}\n\n\t// Not clear the following is the best general approach given GTK layout\n\t// model.\n\t_, height2 := w.handle.GetPreferredHeight()\n\tif height := base.FromPixelsY(height2); height < bc.Max.Height {\n\t\twidth := w.MinIntrinsicWidth(height)\n\t\treturn bc.Constrain(base.Size{width, height})\n\t}\n\n\theight := base.FromPixelsY(height2)\n\twidth := w.MinIntrinsicWidth(height)\n\treturn bc.Constrain(base.Size{width, height})\n}", "func (ca *Appender) Layout() driver.Layout {\n\tca.mu.Lock()\n\tdefer ca.mu.Unlock()\n\treturn ca.layout\n}", "func (f *Font) GetOTLayoutTables() *tt.LayoutTables { return f.otTables }", "func (e *Editor) Layout(gtx layout.Context, sh text.Shaper, font text.Font, size unit.Value) layout.Dimensions {\n\ttextSize := fixed.I(gtx.Px(size))\n\tif e.font != font || e.textSize != textSize {\n\t\te.invalidate()\n\t\te.font = font\n\t\te.textSize = textSize\n\t}\n\tmaxWidth := gtx.Constraints.Max.X\n\tif e.singleLine {\n\t\tmaxWidth = Inf\n\t}\n\tif maxWidth != e.maxWidth {\n\t\te.maxWidth = maxWidth\n\t\te.invalidate()\n\t}\n\tif sh != e.shaper {\n\t\te.shaper = sh\n\t\te.invalidate()\n\t}\n\tif e.mask != e.lastMask {\n\t\te.lastMask = e.mask\n\t\te.invalidate()\n\t}\n\te.makeValid()\n\te.processEvents(gtx)\n\te.makeValid()\n\tif viewSize := gtx.Constraints.Constrain(e.dims.Size); viewSize != e.viewSize {\n\t\te.viewSize = viewSize\n\t\te.invalidate()\n\t}\n\te.makeValid()\n\treturn e.layout(gtx)\n}", "func (ss SpanStyle) Layout(gtx layout.Context, s text.Shaper, shape spanShape) layout.Dimensions {\n\tpaint.ColorOp{Color: ss.Color}.Add(gtx.Ops)\n\tdefer op.Offset(layout.FPt(shape.offset)).Push(gtx.Ops).Pop()\n\tdefer s.Shape(ss.Font, fixed.I(gtx.Px(ss.Size)), shape.layout).Push(gtx.Ops).Pop()\n\tpaint.PaintOp{}.Add(gtx.Ops)\n\treturn layout.Dimensions{Size: shape.size}\n}", "func (v *WindowGroup) native() *C.GtkWindowGroup {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkWindowGroup(p)\n}", "func (r *blockRenderer) Layout(sz fyne.Size) {\n\tst := r.el.Style\n\tpt, pr, pb, pl := st.Padding[0], st.Padding[1], st.Padding[2], st.Padding[3]\n\tmt, mr, mb, ml := st.Margins[0], st.Margins[1], st.Margins[2], st.Margins[3]\n\tbw, br, brdc, bgc := st.BorderWidth, st.BorderRadius, st.BorderColor, st.BgColor\n\txSpace := 2*bw + ml + pl + mr + pr\n\tySpace := 2*bw + mt + pt + mb + pb\n\tlSpace := bw + ml + pl\n\ttSpace := bw + mt + pt\n\n\t// The elementLayout takes care of the sizing within the kids' space. We need\n\t// to offset the within our own margins, padding, and borders.\n\tkidBoxSize := r.el.claimed.Subtract(fyne.NewSize(xSpace, ySpace))\n\tflowingKids, positionedKids := layoutItems(r.el.kids)\n\tr.el.Layout(flowingKids, kidBoxSize)\n\tr.el.positionAbsolutely(positionedKids)\n\n\tkidOffset := fyne.NewPos(lSpace, tSpace)\n\n\tif r.el.Name == \"hr\" {\n\t\tfmt.Println(\"--blockRenderer.Layout \", r.el.claimed, kidBoxSize)\n\t}\n\n\tapplyOffset(flowingKids, kidOffset)\n\n\tx, y := ml, mt\n\tworkingSize := r.el.claimed.Subtract(fyne.NewSize(ml+mr, mt+mb))\n\tobs := make([]fyne.CanvasObject, 0)\n\tif st.BorderWidth > 0 {\n\t\tif br > 0 {\n\t\t\tobs = append(obs, roundedRectangle(workingSize.Width, workingSize.Height,\n\t\t\t\tx, y, br, float32(br*2), brdc)...)\n\t\t} else {\n\t\t\trect := canvas.NewRectangle(brdc)\n\t\t\trect.Resize(workingSize)\n\t\t\tobs = append(obs, rect)\n\t\t\trect.Move(fyne.NewPos(x, y))\n\t\t}\n\t\tx += bw\n\t\ty += bw\n\t\tworkingSize = workingSize.Subtract(fyne.NewSize(bw*2, bw*2))\n\t}\n\n\tif bgc != Transparent {\n\t\tif br > 0 {\n\t\t\tobs = append(obs, roundedRectangle(workingSize.Width, workingSize.Height,\n\t\t\t\tx, y, br, float32(br*2), bgc)...)\n\t\t} else {\n\t\t\trect := canvas.NewRectangle(bgc)\n\t\t\trect.Resize(workingSize)\n\t\t\trect.Move(fyne.NewPos(x, y))\n\t\t\tobs = append(obs, rect)\n\t\t}\n\t}\n\tr.el.obs = obs\n}", "func (v *PixbufLoader) Native() *C.GdkPixbufLoader {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGdkPixbufLoader(p)\n}", "func (v *MenuButton) GetAlignWidget() *Widget {\n\tc := C.gtk_menu_button_get_align_widget(v.native())\n\tif c == nil {\n\t\treturn nil\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapWidget(obj)\n}", "func (w *LabelWidget) Layout(g *gocui.Gui) error {\n\t_, maxY := g.Size()\n\n\ty := int(w.y * float32(maxY))\n\n\tv, err := g.SetView(w.name, w.x, y, w.x+w.w, y+2)\n\tif err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\n\t\tv.FrameColor = w.color\n\n\t\tfmt.Fprint(v, w.label)\n\t}\n\n\treturn nil\n}", "func (ss SpanStyle) Layout(gtx layout.Context, shape spanShape) layout.Dimensions {\n\tpaint.ColorOp{Color: ss.Color}.Add(gtx.Ops)\n\tdefer op.Offset(shape.offset).Push(gtx.Ops).Pop()\n\tshape.call.Add(gtx.Ops)\n\treturn layout.Dimensions{Size: shape.size}\n}", "func (v *Overlay) native() *C.GtkOverlay {\n\tif v == nil {\n\t\treturn nil\n\t}\n\treturn (*C.GtkOverlay)(v.Native())\n}", "func (v *ShortcutsSection) native() *C.GtkShortcutsSection {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkShortcutsSection(p)\n}", "func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {\n\tnewRoot := db.NewFSNodeOverDag(ft.TFile)\n\troot, _, err := fillTrickleRec(db, newRoot, -1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn root, db.Add(root)\n}", "func AlignmentPCenter() *Alignment {\n\tv := AlignmentVCenter\n\treturn &v\n}", "func (recv *Value) GetPointer() uintptr {\n\tretC := C.g_value_get_pointer((*C.GValue)(recv.native))\n\tretGo := (uintptr)(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func (e BackendLayoutType) C() C.cudnnBackendLayoutType_t { return C.cudnnBackendLayoutType_t(e) }", "func (s *layoutStack) topLayout() geom.Layout {\n\treturn s.top().layout\n}", "func (r *SnpDerivedKeyReqABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }", "func (v *TextView) GetJustification() Justification {\n\tc := C.gtk_text_view_get_justification(v.native())\n\treturn Justification(c)\n}", "func (v *HeaderBar) GetDecorationLayout() string {\n\tc := C.gtk_header_bar_get_decoration_layout(v.native())\n\treturn C.GoString((*C.char)(c))\n}", "func (v *Icon) native() *C.GIcon {\n\treturn C.toGIcon(unsafe.Pointer(v.Native()))\n}", "func newLabelFromNative(obj unsafe.Pointer) interface{} {\n\tl := &Label{}\n\tl.object = C.to_GtkLabel(obj)\n\n\tif gobject.IsObjectFloating(l) {\n\t\tgobject.RefSink(l)\n\t} else {\n\t\tgobject.Ref(l)\n\t}\n\tl.Widget = NewWidget(unsafe.Pointer(l.object))\n\tlabelFinalizer(l)\n\n\treturn l\n}", "func (s *ScaleInfo) GetPointer() int {\n\treturn s.pointer\n}", "func (v *Pixbuf) Native() *C.GdkPixbuf {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGdkPixbuf(p)\n}", "func (t Tooltip) Layout(gtx C) D {\n\treturn layout.Stack{}.Layout(gtx,\n\t\tlayout.Expanded(func(gtx C) D {\n\t\t\tradius := gtx.Dp(t.CornerRadius)\n\t\t\tpaint.FillShape(gtx.Ops, t.Bg, clip.RRect{\n\t\t\t\tRect: image.Rectangle{\n\t\t\t\t\tMax: gtx.Constraints.Min,\n\t\t\t\t},\n\t\t\t\tNW: radius,\n\t\t\t\tNE: radius,\n\t\t\t\tSW: radius,\n\t\t\t\tSE: radius,\n\t\t\t}.Op(gtx.Ops))\n\t\t\treturn D{}\n\t\t}),\n\t\tlayout.Stacked(func(gtx C) D {\n\t\t\treturn t.Inset.Layout(gtx, t.Text.Layout)\n\t\t}),\n\t)\n}", "func (g *Game) Layout(outsideWidth, outsideHeight int) (int, int) {\n\treturn outsideWidth, outsideHeight\n}", "func (gui *Gui) getFocusLayout() func(g *gocui.Gui) error {\n\tvar previousView *gocui.View\n\treturn func(g *gocui.Gui) error {\n\t\tnewView := gui.g.CurrentView()\n\t\tif err := gui.onFocusChange(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// for now we don't consider losing focus to a popup panel as actually losing focus\n\t\tif newView != previousView && !gui.isPopupPanel(newView.Name()) {\n\t\t\tif err := gui.onFocusLost(previousView, newView); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := gui.onFocus(newView); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpreviousView = newView\n\t\t}\n\t\treturn nil\n\t}\n}", "func (client *ClientImpl) GetFormLayout(ctx context.Context, args GetFormLayoutArgs) (*FormLayout, error) {\n\trouteValues := make(map[string]string)\n\tif args.ProcessId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ProcessId\"}\n\t}\n\trouteValues[\"processId\"] = (*args.ProcessId).String()\n\tif args.WitRefName == nil || *args.WitRefName == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.WitRefName\"}\n\t}\n\trouteValues[\"witRefName\"] = *args.WitRefName\n\n\tlocationId, _ := uuid.Parse(\"fa8646eb-43cd-4b71-9564-40106fd63e40\")\n\tresp, err := client.Client.Send(ctx, http.MethodGet, locationId, \"6.0-preview.1\", routeValues, nil, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue FormLayout\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (r *SnpDerivedKeyRespABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }", "func augmentLayoutWithM(layout geom.Layout) geom.Layout {\n\tswitch layout {\n\tcase geom.XY, geom.XYM:\n\t\treturn geom.XYM\n\tcase geom.XYZ, geom.XYZM:\n\t\treturn geom.XYZM\n\tdefault:\n\t\treturn layout\n\t}\n}", "func (v *ScaleButton) native() *C.GtkScaleButton {\n\tif v == nil {\n\t\treturn nil\n\t}\n\tptr := unsafe.Pointer(v.Object.Native())\n\treturn C.toGtkScaleButton(ptr)\n}", "func Pointer(v int64) *int64 {\n\treturn helpy.Pointer(v)\n}", "func (t TipIconButtonStyle) Layout(gtx C) D {\n\treturn t.State.Layout(gtx, t.Tooltip, t.IconButtonStyle.Layout)\n}", "func ContentAlignmentPCenter() *ContentAlignment {\n\tv := ContentAlignmentVCenter\n\treturn &v\n}", "func (v *SourceBuffer) native() *C.GtkSourceBuffer {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkSourceBuffer(p)\n}", "func (v *SourceLanguage) native() *C.GtkSourceLanguage {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkSourceLanguage(p)\n}", "func (pg *AppOverviewPage) Layout(gtx layout.Context) layout.Dimensions {\n\tpageContent := []func(gtx C) D{\n\t\tfunc(gtx C) D {\n\t\t\tif len(pg.mixerWallets) == 0 {\n\t\t\t\treturn D{}\n\t\t\t}\n\n\t\t\treturn components.MixerInfoLayout(gtx, pg.Load, true, pg.toMixer.Layout, func(gtx C) D {\n\t\t\t\treturn pg.listMixer.Layout(gtx, len(pg.mixerWallets), func(gtx C, i int) D {\n\t\t\t\t\treturn layout.Inset{Bottom: values.MarginPadding5}.Layout(gtx, func(gtx C) D {\n\t\t\t\t\t\taccounts, _ := pg.mixerWallets[i].GetAccountsRaw()\n\t\t\t\t\t\tvar unmixedBalance string\n\t\t\t\t\t\tfor _, acct := range accounts.Acc {\n\t\t\t\t\t\t\tif acct.Number == pg.mixerWallets[i].UnmixedAccountNumber() {\n\t\t\t\t\t\t\t\tunmixedBalance = dcrutil.Amount(acct.TotalBalance).String()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treturn components.MixerInfoContentWrapper(gtx, pg.Load, func(gtx C) D {\n\t\t\t\t\t\t\treturn layout.Flex{Axis: layout.Vertical}.Layout(gtx,\n\t\t\t\t\t\t\t\tlayout.Rigid(func(gtx C) D {\n\t\t\t\t\t\t\t\t\ttxt := pg.Theme.Label(values.TextSize14, pg.mixerWallets[i].Name)\n\t\t\t\t\t\t\t\t\ttxt.Font.Weight = text.Medium\n\n\t\t\t\t\t\t\t\t\treturn layout.Inset{Bottom: values.MarginPadding10}.Layout(gtx, txt.Layout)\n\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t\tlayout.Rigid(func(gtx C) D {\n\t\t\t\t\t\t\t\t\treturn layout.Flex{Spacing: layout.SpaceBetween, Alignment: layout.Middle}.Layout(gtx,\n\t\t\t\t\t\t\t\t\t\tlayout.Rigid(func(gtx C) D {\n\t\t\t\t\t\t\t\t\t\t\tt := pg.Theme.Label(values.TextSize14, values.String(values.StrUnmixedBalance))\n\t\t\t\t\t\t\t\t\t\t\tt.Color = pg.Theme.Color.GrayText2\n\t\t\t\t\t\t\t\t\t\t\treturn t.Layout(gtx)\n\t\t\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t\t\t\tlayout.Rigid(func(gtx C) D {\n\t\t\t\t\t\t\t\t\t\t\treturn components.LayoutBalanceSize(gtx, pg.Load, unmixedBalance, values.TextSize20)\n\t\t\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t},\n\t\tfunc(gtx C) D {\n\t\t\t// allow the recentTransactionsSection to extend the entire width of the display area.\n\t\t\tgtx.Constraints.Min.X = gtx.Constraints.Max.X\n\t\t\treturn pg.recentTransactionsSection(gtx)\n\t\t},\n\t\tfunc(gtx C) D {\n\t\t\tif pg.WL.MultiWallet.ReadBoolConfigValueForKey(load.FetchProposalConfigKey, false) && len(pg.proposalItems) != 0 {\n\t\t\t\treturn pg.recentProposalsSection(gtx)\n\t\t\t}\n\t\t\treturn D{}\n\t\t},\n\t\tfunc(gtx C) D {\n\t\t\treturn pg.syncStatusSection(gtx)\n\t\t},\n\t}\n\n\tif pg.WL.MultiWallet.IsSyncing() || pg.WL.MultiWallet.IsRescanning() || pg.WL.MultiWallet.Politeia.IsSyncing() {\n\t\t// Will refresh the overview page every 2 seconds while\n\t\t// sync is active. When sync/rescan is started or ended,\n\t\t// sync is considered inactive and no refresh occurs. A\n\t\t// sync state change listener is used to refresh the display\n\t\t// when the sync state changes.\n\t\top.InvalidateOp{At: gtx.Now.Add(2 * time.Second)}.Add(gtx.Ops)\n\t}\n\n\treturn components.UniformPadding(gtx, func(gtx C) D {\n\t\treturn pg.Theme.List(pg.scrollContainer).Layout(gtx, len(pageContent), func(gtx C, i int) D {\n\t\t\tm := values.MarginPadding5\n\t\t\tif i == len(pageContent) {\n\t\t\t\t// remove padding after the last item\n\t\t\t\tm = values.MarginPadding0\n\t\t\t}\n\t\t\treturn layout.Inset{\n\t\t\t\tRight: values.MarginPadding2,\n\t\t\t\tBottom: m,\n\t\t\t}.Layout(gtx, pageContent[i])\n\t\t})\n\t})\n}", "func ParseLayout(i io.Reader) layout {\n\tr := bufio.NewScanner(i)\n\trv := layout{Rows: make([][]widgetRule, 0)}\n\tvar lineNo int\n\tfor r.Scan() {\n\t\tl := strings.TrimSpace(r.Text())\n\t\tif l == \"\" || l[0] == '#' {\n\t\t\tcontinue\n\t\t}\n\t\trow := make([]widgetRule, 0)\n\t\tws := strings.Fields(l)\n\t\tweightTotal := 0\n\t\tfor _, w := range ws {\n\t\t\twr := widgetRule{Weight: 1}\n\t\t\tks := strings.Split(w, \"/\")\n\t\t\trs := strings.Split(ks[0], \":\")\n\t\t\tvar wid string\n\t\t\tif len(rs) > 1 {\n\t\t\t\tv, e := strconv.Atoi(rs[0])\n\t\t\t\tif e != nil {\n\t\t\t\t\tln := strconv.Itoa(lineNo)\n\t\t\t\t\tlog.Printf(tr.Value(\"layout.error.format\", \"INT:STRING/INT\", ln, rs[0], w))\n\t\t\t\t\tv = 1\n\t\t\t\t}\n\t\t\t\tif v < 1 {\n\t\t\t\t\tv = 1\n\t\t\t\t}\n\t\t\t\twr.Height = v\n\t\t\t\twid = rs[1]\n\t\t\t} else {\n\t\t\t\twr.Height = 1\n\t\t\t\twid = rs[0]\n\t\t\t}\n\t\t\twr.Widget = strings.ToLower(wid)\n\t\t\tif len(ks) > 1 {\n\t\t\t\tweight, e := strconv.Atoi(ks[1])\n\t\t\t\tif e != nil {\n\t\t\t\t\tln := strconv.Itoa(lineNo)\n\t\t\t\t\tlog.Printf(tr.Value(\"layout.error.format\", \"STRING/INT\", ln, ks[1], w))\n\t\t\t\t\tweight = 1\n\t\t\t\t}\n\t\t\t\tif weight < 1 {\n\t\t\t\t\tweight = 1\n\t\t\t\t}\n\t\t\t\twr.Weight = float64(weight)\n\t\t\t\tif len(ks) > 2 {\n\t\t\t\t\tln := strconv.Itoa(lineNo)\n\t\t\t\t\tlog.Printf(tr.Value(\"layout.error.slashes\", ln, w))\n\t\t\t\t}\n\t\t\t\tweightTotal += weight\n\t\t\t} else {\n\t\t\t\tweightTotal++\n\t\t\t}\n\t\t\trow = append(row, wr)\n\t\t}\n\t\t// Prevent tricksy users from breaking their own computers\n\t\tif weightTotal <= 1 {\n\t\t\tweightTotal = 1\n\t\t}\n\t\tfor i, w := range row {\n\t\t\trow[i].Weight = w.Weight / float64(weightTotal)\n\t\t}\n\t\trv.Rows = append(rv.Rows, row)\n\t}\n\treturn rv\n}", "func (v *IconView) native() *C.GtkIconView {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkIconView(p)\n}", "func (v *ListBox) native() *C.GtkListBox {\n\tif v == nil {\n\t\treturn nil\n\t}\n\treturn (*C.GtkListBox)(v.Native())\n}", "func (g GoType) PointerMethod() Expr {\n\tif g == GoBytes {\n\t\treturn \"Bytes\"\n\t}\n\treturn Expr(strings.ToUpper(string(g[:1])) + string(g[1:]))\n}", "func (w *NavigationWidget) Layout(g *gocui.Gui) error {\n\tmaxX, maxY := g.Size()\n\n\tx0, y0, x1, y1 := handleWidgetSize(\n\t\tmaxX,\n\t\tmaxY,\n\t\tint(w.x0*float32(maxX)),\n\t\tint(w.y0*float32(maxY)),\n\t\tint(w.x1+float32(maxX)),\n\t\tint(w.y1*float32(maxY)),\n\t)\n\tif v, err := g.SetView(w.name, x0, y0, x1, y1); err != nil {\n\t\tif !errors.Is(err, gocui.ErrUnknownView) {\n\t\t\treturn err\n\t\t}\n\n\t\tv.Title = w.label\n\n\t\ttmpOptions := make([]string, len(w.options))\n\t\tcopy(tmpOptions, w.options)\n\t\ttmpOptions[0] = green.Sprint(\"Rows\")\n\n\t\tfmt.Fprint(v, strings.Join(tmpOptions, \" \"))\n\t}\n\n\treturn nil\n}", "func (m *ModalLayer) Layout(gtx layout.Context, th *material.Theme) layout.Dimensions {\n\tif !m.Visible() {\n\t\treturn D{}\n\t}\n\tif m.Scrim.Clicked() {\n\t\tm.Disappear(gtx.Now)\n\t}\n\tscrimDims := m.Scrim.Layout(gtx, th, &m.VisibilityAnimation)\n\tif m.Widget != nil {\n\t\t_ = m.Widget(gtx, th, &m.VisibilityAnimation)\n\t}\n\treturn scrimDims\n}", "func (v *Variant) Native() uintptr {\n\treturn uintptr(unsafe.Pointer(v.native()))\n}", "func (t TextStyle) Layout(gtx layout.Context) layout.Dimensions {\n\tspans := make([]SpanStyle, len(t.Styles))\n\tcopy(spans, t.Styles)\n\tt.State.reset()\n\n\tvar (\n\t\tlineDims image.Point\n\t\tlineAscent int\n\t\toverallSize image.Point\n\t\tlineShapes []spanShape\n\t\tlineStartIndex int\n\t\tstate *InteractiveSpan\n\t)\n\n\tfor i := 0; i < len(spans); i++ {\n\t\t// grab the next span\n\t\tspan := spans[i]\n\n\t\t// constrain the width of the line to the remaining space\n\t\tmaxWidth := gtx.Constraints.Max.X - lineDims.X\n\n\t\t// shape the text of the current span\n\t\tlines := t.Shaper.LayoutString(span.Font, fixed.I(gtx.Px(span.Size)), maxWidth, span.Content)\n\n\t\t// grab the first line of the result and compute its dimensions\n\t\tfirstLine := lines[0]\n\t\tspanWidth := firstLine.Width.Ceil()\n\t\tspanHeight := (firstLine.Ascent + firstLine.Descent).Ceil()\n\t\tspanAscent := firstLine.Ascent.Ceil()\n\n\t\t// store the text shaping results for the line\n\t\tlineShapes = append(lineShapes, spanShape{\n\t\t\toffset: image.Point{X: lineDims.X},\n\t\t\tsize: image.Point{X: spanWidth, Y: spanHeight},\n\t\t\tlayout: firstLine.Layout,\n\t\t})\n\n\t\t// update the dimensions of the current line\n\t\tlineDims.X += spanWidth\n\t\tif lineDims.Y < spanHeight {\n\t\t\tlineDims.Y = spanHeight\n\t\t}\n\t\tif lineAscent < spanAscent {\n\t\t\tlineAscent = spanAscent\n\t\t}\n\n\t\t// update the width of the overall text\n\t\tif overallSize.X < lineDims.X {\n\t\t\toverallSize.X = lineDims.X\n\t\t}\n\n\t\t// if we are breaking the current span across lines or we are on the\n\t\t// last span, lay out all of the spans for the line.\n\t\tif len(lines) > 1 || i == len(spans)-1 {\n\t\t\tfor i, shape := range lineShapes {\n\t\t\t\t// lay out this span\n\t\t\t\tspan = spans[i+lineStartIndex]\n\t\t\t\tshape.offset.Y = overallSize.Y + lineAscent\n\t\t\t\tspan.Layout(gtx, t.Shaper, shape)\n\n\t\t\t\tif !span.Interactive {\n\t\t\t\t\tstate = nil\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// grab an interactive state and lay it out atop the text.\n\t\t\t\t// If we still have a state, this line is a continuation of\n\t\t\t\t// the previous span and we should use the same state.\n\t\t\t\tif state == nil {\n\t\t\t\t\tstate = t.State.next()\n\t\t\t\t\tstate.contents = span.Content\n\t\t\t\t\tstate.metadata = span.metadata\n\t\t\t\t}\n\t\t\t\t// set this offset to the upper corner of the text, not the lower\n\t\t\t\tshape.offset.Y -= lineDims.Y\n\t\t\t\toffStack := op.Offset(layout.FPt(shape.offset)).Push(gtx.Ops)\n\t\t\t\tpr := pointer.Rect(image.Rectangle{Max: shape.size}).Push(gtx.Ops)\n\t\t\t\tstate.Layout(gtx)\n\t\t\t\tpointer.CursorNameOp{Name: pointer.CursorPointer}.Add(gtx.Ops)\n\t\t\t\tpr.Pop()\n\t\t\t\toffStack.Pop()\n\t\t\t\t// ensure that we request new state for each interactive text\n\t\t\t\t// that isn't breaking across a line.\n\t\t\t\tif i < len(lineShapes)-1 {\n\t\t\t\t\tstate = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\t// reset line shaping data and update overall vertical dimensions\n\t\t\tlineShapes = lineShapes[:0]\n\t\t\toverallSize.Y += lineDims.Y\n\t\t}\n\n\t\t// if the current span breaks across lines\n\t\tif len(lines) > 1 {\n\t\t\t// mark where the next line to be laid out starts\n\t\t\tlineStartIndex = i + 1\n\t\t\tlineDims = image.Point{}\n\t\t\tlineAscent = 0\n\n\t\t\t// if this span isn't interactive, don't use the same interaction\n\t\t\t// state on the next line.\n\t\t\tif !span.Interactive {\n\t\t\t\tstate = nil\n\t\t\t}\n\n\t\t\t// ensure the spans slice has room for another span\n\t\t\tspans = append(spans, SpanStyle{})\n\t\t\t// shift existing spans further\n\t\t\tfor k := len(spans) - 1; k > i+1; k-- {\n\t\t\t\tspans[k] = spans[k-1]\n\t\t\t}\n\t\t\t// synthesize and insert a new span\n\t\t\tspan.Content = span.Content[len(firstLine.Layout.Text):]\n\t\t\tspans[i+1] = span\n\t\t}\n\t}\n\n\treturn layout.Dimensions{Size: gtx.Constraints.Constrain(overallSize)}\n}", "func (v *LinkButton) native() *C.GtkLinkButton {\n\tif v == nil {\n\t\treturn nil\n\t}\n\tptr := unsafe.Pointer(v.Object.Native())\n\treturn C.toGtkLinkButton(ptr)\n}", "func (self Label) ToNative() unsafe.Pointer {\n\treturn unsafe.Pointer(self.object)\n}", "func (r *snpUserGuestRequestConversion) Pointer() unsafe.Pointer {\n\treturn unsafe.Pointer(&r.abi)\n}", "func (v *ShortcutsGroup) native() *C.GtkShortcutsGroup {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkShortcutsGroup(p)\n}", "func (w *ButtonWidget) Layout(g *gocui.Gui) error {\n\t_, maxY := g.Size()\n\n\ty := int(w.y * float32(maxY))\n\n\tv, err := g.SetView(w.name, w.x, y, w.x+w.w, y+2)\n\tif err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\n\t\tv.FrameColor = w.color\n\n\t\tfmt.Fprint(v, w.label)\n\t}\n\n\treturn nil\n}", "func nativeLayer() native { return &lin{} }", "func (v *Event) Native() uintptr {\n\treturn uintptr(unsafe.Pointer(v.native()))\n}", "func (w *EditorWidget) Layout(g *gocui.Gui) error {\n\tmaxX, maxY := g.Size()\n\n\tx0, y0, x1, y1 := handleWidgetSize(\n\t\tmaxX,\n\t\tmaxY,\n\t\tint(w.x0*float32(maxX)),\n\t\tint(w.y0*float32(maxY)),\n\t\tint(w.x1+float32(maxX)),\n\t\tint(w.y1*float32(maxY)),\n\t)\n\tif v, err := g.SetView(w.name, x0, y0, x1, y1); err != nil {\n\t\tif !errors.Is(err, gocui.ErrUnknownView) {\n\t\t\treturn err\n\t\t}\n\n\t\tv.Title = w.label\n\t\tv.Editable = true\n\t\tv.Wrap = true\n\t\tv.Highlight = true\n\n\t\tif _, err := g.SetCurrentView(w.name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (v *ScaleButton) GetAdjustment() (*Adjustment, error) {\n\tc := C.gtk_scale_button_get_adjustment(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapAdjustment(obj), nil\n}", "func (o *GroupWidgetDefinition) GetLayoutType() WidgetLayoutType {\n\tif o == nil {\n\t\tvar ret WidgetLayoutType\n\t\treturn ret\n\t}\n\treturn o.LayoutType\n}", "func (v *mandelbrotViewer) Layout(outsideWidth, outsideHeight int) (int, int) {\n\treturn *widthFlag, *heightFlag\n}", "func (m ConsistentType) Pointer() *ConsistentType {\n\treturn &m\n}", "func InitLayout(dataSize int64, dataAndTreeInSameFile bool) Layout {\n\tlayout := Layout{\n\t\tblockSize: usermem.PageSize,\n\t\t// TODO(b/156980949): Allow config other hash methods (SHA384/SHA512).\n\t\tdigestSize: sha256DigestSize,\n\t}\n\n\t// treeStart is the offset (in bytes) of the first level of the tree in\n\t// the file. If data and tree are in different files, treeStart should\n\t// be zero. If data is in the same file as the tree, treeStart points\n\t// to the block after the last data block (which may be zero-padded).\n\tvar treeStart int64\n\tif dataAndTreeInSameFile {\n\t\ttreeStart = dataSize\n\t\tif dataSize%layout.blockSize != 0 {\n\t\t\ttreeStart += layout.blockSize - dataSize%layout.blockSize\n\t\t}\n\t}\n\n\tnumBlocks := (dataSize + layout.blockSize - 1) / layout.blockSize\n\tlevel := 0\n\toffset := int64(0)\n\n\t// Calculate the number of levels in the Merkle tree and the beginning\n\t// offset of each level. Level 0 consists of the leaf nodes that\n\t// contain the hashes of the data blocks, while level numLevels - 1 is\n\t// the root.\n\tfor numBlocks > 1 {\n\t\tlayout.levelOffset = append(layout.levelOffset, treeStart+offset*layout.blockSize)\n\t\t// Round numBlocks up to fill up a block.\n\t\tnumBlocks += (layout.hashesPerBlock() - numBlocks%layout.hashesPerBlock()) % layout.hashesPerBlock()\n\t\toffset += numBlocks / layout.hashesPerBlock()\n\t\tnumBlocks = numBlocks / layout.hashesPerBlock()\n\t\tlevel++\n\t}\n\tlayout.levelOffset = append(layout.levelOffset, treeStart+offset*layout.blockSize)\n\n\treturn layout\n}", "func (o *GroupWidgetDefinition) GetLayoutTypeOk() (*WidgetLayoutType, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.LayoutType, true\n}", "func (service *Soap) DescribeApprovalLayout(request *DescribeApprovalLayout) (*DescribeApprovalLayoutResponse, error) {\n\tresponse := new(DescribeApprovalLayoutResponse)\n\terr := service.client.Call(request, response, service.responseHeader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}", "func (v *ThemedIcon) native() *C.GThemedIcon {\n\tif v == nil {\n\t\treturn nil\n\t}\n\tptr := unsafe.Pointer(v.Object.Native())\n\treturn C.toGThemedIcon(ptr)\n}", "func (s *WidgetBase) LayoutMode() LayoutMode {\n\treturn s.layout\n}", "func (pg *walletPage) Layout(gtx layout.Context) layout.Dimensions {\n\tcommon := pg.common\n\tif *pg.refreshPage {\n\t\tcommon.refreshWindow()\n\t\t*pg.refreshPage = false\n\t}\n\n\tif common.info.LoadedWallets == 0 {\n\t\treturn common.Layout(gtx, func(gtx C) D {\n\t\t\treturn common.UniformPadding(gtx, func(gtx C) D {\n\t\t\t\treturn layout.Center.Layout(gtx, func(gtx C) D {\n\t\t\t\t\treturn common.theme.H3(values.String(values.StrNoWalletLoaded)).Layout(gtx)\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t}\n\n\tfor index := 0; index < common.info.LoadedWallets; index++ {\n\t\tif common.info.Wallets[index].IsWatchingOnly {\n\t\t\tif _, ok := pg.watchOnlyWalletMoreButtons[index]; !ok {\n\t\t\t\tpg.watchOnlyWalletMoreButtons[index] = decredmaterial.IconButton{\n\t\t\t\t\tIconButtonStyle: material.IconButtonStyle{\n\t\t\t\t\t\tButton: new(widget.Clickable),\n\t\t\t\t\t\tIcon: common.icons.navigationMore,\n\t\t\t\t\t\tSize: values.MarginPadding25,\n\t\t\t\t\t\tBackground: color.NRGBA{},\n\t\t\t\t\t\tColor: common.theme.Color.Text,\n\t\t\t\t\t\tInset: layout.UniformInset(values.MarginPadding0),\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif _, ok := pg.collapsibles[index]; !ok {\n\t\t\t\taddAcctBtn := common.theme.IconButton(new(widget.Clickable), common.icons.contentAdd)\n\t\t\t\taddAcctBtn.Inset = layout.UniformInset(values.MarginPadding0)\n\t\t\t\taddAcctBtn.Size = values.MarginPadding25\n\t\t\t\taddAcctBtn.Background = color.NRGBA{}\n\t\t\t\taddAcctBtn.Color = common.theme.Color.Text\n\n\t\t\t\tbackupBtn := common.theme.PlainIconButton(new(widget.Clickable), common.icons.navigationArrowForward)\n\t\t\t\tbackupBtn.Color = common.theme.Color.Surface\n\t\t\t\tbackupBtn.Inset = layout.UniformInset(values.MarginPadding0)\n\t\t\t\tbackupBtn.Size = values.MarginPadding20\n\n\t\t\t\tpg.collapsibles[index] = collapsible{\n\t\t\t\t\tcollapsible: pg.theme.CollapsibleWithOption(),\n\t\t\t\t\taddAcctBtn: addAcctBtn,\n\t\t\t\t\tbackupAcctBtn: backupBtn,\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\n\tpageContent := []func(gtx C) D{\n\t\tfunc(gtx C) D {\n\t\t\treturn pg.walletSection(gtx, common)\n\t\t},\n\t\tfunc(gtx C) D {\n\t\t\treturn pg.watchOnlyWalletSection(gtx, common)\n\t\t},\n\t}\n\n\tbody := func(gtx C) D {\n\t\treturn layout.Stack{Alignment: layout.SE}.Layout(gtx,\n\t\t\tlayout.Expanded(func(gtx C) D {\n\t\t\t\treturn pg.container.Layout(gtx, len(pageContent), func(gtx C, i int) D {\n\t\t\t\t\tdims := layout.UniformInset(values.MarginPadding5).Layout(gtx, pageContent[i])\n\t\t\t\t\tif pg.isAddWalletMenuOpen || pg.openPopupIndex != -1 {\n\t\t\t\t\t\tdims.Size.Y += 60\n\t\t\t\t\t}\n\t\t\t\t\treturn dims\n\t\t\t\t})\n\t\t\t}),\n\t\t\tlayout.Stacked(func(gtx C) D {\n\t\t\t\treturn pg.layoutAddWalletSection(gtx, common)\n\t\t\t}),\n\t\t)\n\t}\n\n\treturn common.Layout(gtx, func(gtx C) D {\n\t\treturn layout.Stack{}.Layout(gtx,\n\t\t\tlayout.Expanded(func(gtx C) D {\n\t\t\t\treturn common.UniformPadding(gtx, body)\n\t\t\t}),\n\t\t\tlayout.Expanded(func(gtx C) D {\n\t\t\t\tif pg.isAddWalletMenuOpen || pg.openPopupIndex != -1 {\n\t\t\t\t\thalfHeight := gtx.Constraints.Max.Y / 2\n\t\t\t\t\treturn pg.container.Layout(gtx, len(pg.backdrops), func(gtx C, i int) D {\n\t\t\t\t\t\tgtx.Constraints.Min.Y = halfHeight\n\t\t\t\t\t\treturn pg.backdrops[i].Layout(gtx)\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t\treturn D{}\n\t\t\t}),\n\t\t)\n\t})\n}", "func (self Separator) ToNative() unsafe.Pointer {\n\treturn unsafe.Pointer(self.object)\n}", "func (gui *Gui) Layout() tview.Primitive {\n\tgui.pages.AddPage(\"help\", newHelpView(), true, true)\n\n\tgui.rootFlex = tview.NewFlex().SetDirection(tview.FlexRow).AddItem(gui.pages, 0, 1, true)\n\tgui.rootFlex.AddItem(gui.navBar, 1, 1, true)\n\treturn gui.rootFlex\n}", "func (t TruncatingLabelStyle) Layout(gtx layout.Context) layout.Dimensions {\n\toriginalMaxX := gtx.Constraints.Max.X\n\tgtx.Constraints.Max.X *= 2\n\tasLabel := material.LabelStyle(t)\n\tasLabel.MaxLines = 1\n\tmacro := op.Record(gtx.Ops)\n\tdimensions := asLabel.Layout(gtx)\n\tlabelOp := macro.Stop()\n\tif dimensions.Size.X <= originalMaxX {\n\t\t// No need to truncate\n\t\tlabelOp.Add(gtx.Ops)\n\t\treturn dimensions\n\t}\n\tgtx.Constraints.Max.X = originalMaxX\n\ttruncationIndicator := asLabel\n\ttruncationIndicator.Text = \"…\"\n\treturn layout.Flex{Alignment: layout.Middle}.Layout(gtx,\n\t\tlayout.Flexed(1, func(gtx C) D {\n\t\t\treturn asLabel.Layout(gtx)\n\t\t}),\n\t\tlayout.Rigid(func(gtx C) D {\n\t\t\treturn truncationIndicator.Layout(gtx)\n\t\t}),\n\t)\n}", "func (tv *TwinTextViews) TextViewLays() (*gi.Layout, *gi.Layout) {\n\ta := tv.Child(0).(*gi.Layout)\n\tb := tv.Child(1).(*gi.Layout)\n\treturn a, b\n}", "func (mv *MultipleView) layout(t *theme.Theme) {\n\tmv.root.Wrappee().Rect = mv.Rect\n\tmv.root.Layout(t)\n}", "func (*Plasma) Layout(_, _ int) (int, int) {\r\n\treturn Width, Height\r\n}", "func (ow *OutputWidget) Layout(g *gocui.Gui) error {\n\treturn ow.Widget.Layout(\n\t\tow.viewName(),\n\t\tg,\n\t\tfunc(v *gocui.View) {\n\t\t\tif ow.Focus {\n\t\t\t\tv.BgColor = gocui.ColorBlack\n\t\t\t\tv.FgColor = gocui.ColorWhite\n\t\t\t} else {\n\t\t\t\tv.BgColor = 0\n\t\t\t\tv.FgColor = 0\n\t\t\t}\n\t\t\tif v.Autoscroll && ow.Focus {\n\t\t\t\t_, h := v.Size()\n\t\t\t\tl := len(v.BufferLines())\n\t\t\t\tv.Autoscroll = false\n\t\t\t\tow.OriginY = l - h\n\t\t\t}\n\t\t},\n\t)\n}", "func (self Statusbar) ToNative() unsafe.Pointer {\n\treturn unsafe.Pointer(self.object)\n}", "func (v *MainLoop) Native() *C.GMainLoop {\n\tif v == nil || v.ptr == nil {\n\t\treturn nil\n\t}\n\treturn (*C.GMainLoop)(v.ptr)\n}", "func (m PartitionUsage) Pointer() *PartitionUsage {\n\treturn &m\n}", "func makeLayoutStack() layoutStack {\n\treturn layoutStack{\n\t\tdata: []layoutStackObj{{layout: geom.NoLayout, inBaseTypeCollection: true}},\n\t}\n}", "func (m VipType) Pointer() *VipType {\n\treturn &m\n}" ]
[ "0.539174", "0.52555996", "0.52151483", "0.52117836", "0.51357627", "0.5078264", "0.5049439", "0.49843845", "0.49271524", "0.4918036", "0.48844993", "0.48744643", "0.48683408", "0.4788148", "0.4753523", "0.4732755", "0.46843067", "0.4650018", "0.46077663", "0.45780843", "0.4560339", "0.45260024", "0.45145595", "0.44907185", "0.44874886", "0.44376403", "0.4427173", "0.4426788", "0.44196057", "0.44113117", "0.44037464", "0.44025314", "0.43833274", "0.43821752", "0.4380042", "0.4373036", "0.4361381", "0.4360236", "0.43366253", "0.43332678", "0.43246272", "0.43134332", "0.43092227", "0.43054026", "0.42990744", "0.42839357", "0.4278575", "0.42762274", "0.42756867", "0.4266966", "0.42585555", "0.4250188", "0.42437", "0.4239075", "0.4237895", "0.42266756", "0.42263624", "0.42202133", "0.4211541", "0.42078185", "0.4204068", "0.42001927", "0.41980332", "0.41918853", "0.41859147", "0.41815913", "0.41719565", "0.41704956", "0.41698903", "0.4157048", "0.41554627", "0.41514695", "0.414651", "0.41439527", "0.4143531", "0.4141654", "0.41348147", "0.4118148", "0.41173542", "0.41139516", "0.4102779", "0.40983805", "0.4095388", "0.4095203", "0.40925035", "0.4091796", "0.4091665", "0.40800473", "0.40714142", "0.40649948", "0.40611503", "0.40572393", "0.40554234", "0.40552968", "0.40541705", "0.4049246", "0.4038208", "0.4037929", "0.40330404", "0.4030434" ]
0.417509
66
Fields of the UnitOfMedicine.
func (UnitOfMedicine) Fields() []ent.Field { return []ent.Field{ field.String("name"), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (Medicalfile) Fields() []ent.Field {\n return []ent.Field{\n\t\tfield.String(\"detail\").NotEmpty(),\n\t\tfield.Time(\"added_time\"),\n }\n}", "func (m *ShowMeasurementsMapper) Fields() []string { return []string{\"name\"} }", "func (Medicaltreatmentrights) Fields() []ent.Field {\n\treturn nil\n}", "func (Physician) Fields() []ent.Field {\n return []ent.Field{\n field.String(\"Physicianname\"),\n field.String(\"Physicianemail\"). \n Unique(), \n field.String(\"Password\"), \n } \n}", "func (Patientroom) Fields() []ent.Field {\n return []ent.Field{\n field.String(\"Typeroom\").\n Unique(),\n \n } \n}", "func (Roomtype) Fields() []ent.Field {\n return []ent.Field{\n field.Int(\"ROOMPRICE\"),\n\t field.String(\"TYPEDEATAIL\").\n\t \t NotEmpty(),\n }\n}", "func (PatientInfo) Fields() []ent.Field {\r\n\treturn []ent.Field{\r\n\t\tfield.String(\"cardNumber\").NotEmpty().Unique(),\r\n\t\tfield.String(\"name\").NotEmpty(),\r\n\t\tfield.String(\"gender\").NotEmpty(),\r\n\t\tfield.Int(\"age\").Positive(),\r\n\t}\r\n}", "func (Diagnosis) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"symptom\").NotEmpty(),\n\t\tfield.String(\"Opinionresult\").Validate(func(s string) error {\n\t\t\tmatch, _ := regexp.MatchString(\"[ก-๘]\", s)\n\t\t\tif !match {\n\t\t\t\treturn errors.New(\"กรุณากรอกภาษาไทย [ก-๘] \")\n\t\t\t}\n\t\t\treturn nil\n\t\t}),\n\t\tfield.String(\"note\").MaxLen(25),\n\n\t\t//field.DATE\n\n\t\tfield.Time(\"diagnosisDate\").Default(time.Now),\n\t}\n}", "func (Patientrecord) Fields() []ent.Field {\n\treturn []ent.Field{\n\n field.String(\"Name\"),\n\n }\n}", "func (Operationroom) Fields() []ent.Field {\n return []ent.Field{\n field.String(\"operationroom_name\").NotEmpty(),\n }\n}", "func (DrugAllergy) Fields() []ent.Field {\n return []ent.Field{\n \n }\n }", "func (e Department) EntFields() ent.Fields { return ent_Department_fields }", "func (Dentist) Fields() []ent.Field {\n return []ent.Field{\n\tfield.String(\"name\").NotEmpty(),\n\tfield.Int(\"age\").Positive(),\n\tfield.String(\"cardid\").NotEmpty(),\n\tfield.Time(\"birthday\"),\n\tfield.String(\"experience\").NotEmpty(),\n\tfield.String(\"tel\").NotEmpty(),\n\tfield.String(\"email\").NotEmpty(),\n\tfield.String(\"password\").NotEmpty(),\n\n }\n}", "func (MedicalProcedure) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"procedureOrder\").Validate(func(s string) error {\n match, _ := regexp.MatchString(\"[U]+[N]+[S]\\\\d{6}\" ,s)\n if !match {\n return errors.New(\"รูปแบบรหัสไม่ถูกต้อง\")\n }\n return nil\n }),\n\t\tfield.String(\"procedureRoom\").MaxLen(4).MinLen(4),\n\t\tfield.Time(\"Addtime\"),\n\t\tfield.String(\"procedureDescripe\").NotEmpty(),\n\t}\n}", "func (Examinationroom) Fields() []ent.Field {\n return []ent.Field{\n field.String(\"examinationroom_name\").NotEmpty(),\n }\n }", "func (Dentist) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"Dentist_name\").NotEmpty(),\n\t \n\t}\n}", "func (m *OrgUnitMemberMutation) Fields() []string {\n\tfields := make([]string, 0, 8)\n\tif m.create_by != nil {\n\t\tfields = append(fields, orgunitmember.FieldCreateByUser)\n\t}\n\tif m.update_by != nil {\n\t\tfields = append(fields, orgunitmember.FieldUpdateByUser)\n\t}\n\tif m.create_time != nil {\n\t\tfields = append(fields, orgunitmember.FieldCreateTime)\n\t}\n\tif m.update_time != nil {\n\t\tfields = append(fields, orgunitmember.FieldUpdateTime)\n\t}\n\tif m.is_additional != nil {\n\t\tfields = append(fields, orgunitmember.FieldIsAdditional)\n\t}\n\tif m.user != nil {\n\t\tfields = append(fields, orgunitmember.FieldUserID)\n\t}\n\tif m.belongToOrgUnit != nil {\n\t\tfields = append(fields, orgunitmember.FieldOrgUnitID)\n\t}\n\tif m.position != nil {\n\t\tfields = append(fields, orgunitmember.FieldOrgUnitPositionID)\n\t}\n\treturn fields\n}", "func (Detail) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"explain\").Validate(func(s string) error {\n\t\t\tmatch, _ := regexp.MatchString(\"^[ก-๏\\\\s]+$\", s)\n\t\t\tif !match {\n\t\t\t\treturn errors.New(\"กรอกรายละเอียดเป็นภาษาไทยเท่านั้น\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}),\n\t\tfield.String(\"phone\").MaxLen(10).MinLen(10),\n\t\tfield.String(\"email\").Match(regexp.MustCompile(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$\")),\n\t\tfield.String(\"departmentid\").MaxLen(3).MinLen(3),\n\t}\n}", "func (Statusd) Fields() []ent.Field {\n\treturn []ent.Field{\n field.String(\"statusdname\").Unique().NotEmpty(),\n }\n}", "func (FurnitureType) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"furniture_type\").Unique(),\n\t}\n}", "func (Machine) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"hwid\").NotEmpty().\n\t\t\tAnnotations(entproto.Field(2)),\n\t\tfield.String(\"hostname\").NotEmpty().\n\t\t\tAnnotations(entproto.Field(3)),\n\t\tfield.String(\"fingerprint\").NotEmpty().\n\t\t\tAnnotations(entproto.Field(4)),\n\t}\n}", "func (m *OrgUnitMutation) Fields() []string {\n\tfields := make([]string, 0, 6)\n\tif m.create_by != nil {\n\t\tfields = append(fields, orgunit.FieldCreateByUser)\n\t}\n\tif m.update_by != nil {\n\t\tfields = append(fields, orgunit.FieldUpdateByUser)\n\t}\n\tif m.create_time != nil {\n\t\tfields = append(fields, orgunit.FieldCreateTime)\n\t}\n\tif m.update_time != nil {\n\t\tfields = append(fields, orgunit.FieldUpdateTime)\n\t}\n\tif m.name != nil {\n\t\tfields = append(fields, orgunit.FieldName)\n\t}\n\tif m.duty != nil {\n\t\tfields = append(fields, orgunit.FieldDuty)\n\t}\n\treturn fields\n}", "func (c *MedicineClient) QueryUnitOfMedicine(m *Medicine) *UnitOfMedicineQuery {\n\tquery := &UnitOfMedicineQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := m.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(medicine.Table, medicine.FieldID, id),\n\t\t\tsqlgraph.To(unitofmedicine.Table, unitofmedicine.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, medicine.UnitOfMedicineTable, medicine.UnitOfMedicineColumn),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(m.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}", "func (m Message) UnitOfMeasure() (*field.UnitOfMeasureField, quickfix.MessageRejectError) {\n\tf := &field.UnitOfMeasureField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (Bookreturn) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.Time(\"RETURN_TIME\"),\n\t\tfield.Int(\"DAMAGED_POINT\").Range(-1, 10),\n\t\t\n\t\tfield.String(\"DAMAGED_POINTNAME\").Validate(func(s string) error {\n\t\t\tmatch, _ := regexp.MatchString(\"^[a-zA-Z, ]+$\", s)\n\t\t\tif !match {\n\t\t\t\treturn errors.New(\"จุดที่เสียหายเป็นภาษาอังกฤษเท่านั้น เช่น TopFront,BottomBack\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}),\n\t\tfield.String(\"LOST\").Validate(func(s string) error {\n\t\t\tmatch, _ := regexp.MatchString(\"^[a-zA-Z]+$\", s)\n\t\t\tif !match {\n\t\t\t\treturn errors.New(\"ถ้าหายให้พิมพ์ lost ถ้าไม่พิมพ์ no\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}).MaxLen(5),\n\t}\n}", "func (Timing) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"category\"),\n\t\tfield.String(\"timing_label\").StorageKey(\"label\"),\n\t\tfield.String(\"unit\"),\n\t\tfield.String(\"variable\"),\n\t\tfield.Float(\"value\"),\n\t}\n}", "func (m *PurposeMutation) Fields() []string {\n\tfields := make([]string, 0, 1)\n\tif m.objective != nil {\n\t\tfields = append(fields, purpose.FieldObjective)\n\t}\n\treturn fields\n}", "func (Doctor) Fields() []ent.Field {\n return []ent.Field{\n field.String(\"name\").Validate(func(s string) error {\n match, _ := regexp.MatchString(\"^[ก-๏\\\\s]+$\",s)\n if !match {\n return errors.New(\"กรุณากรอกชื่อเป็นภาษาไทยเท่านั้น\")\n }\n return nil\n }),\n // field.String(\"name\").NotEmpty(),\n field.Int(\"age\").Range(26,55),\n field.String(\"email\").Match(regexp.MustCompile(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$\")),\n field.String(\"password\").MaxLen(8).MinLen(8),\n field.String(\"address\").NotEmpty(),\n field.String(\"educational\").NotEmpty(),\n field.String(\"phone\").MaxLen(10).MinLen(10),\n\n }\n}", "func (Usertype) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"name\").NotEmpty(),\n\t}\n}", "func (Status) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"description\").NotEmpty().Unique(),\n\t}\n}", "func (m *ccMetric) Fields() map[string]interface{} {\n\treturn m.fields\n}", "func (Recordfood) Fields() []ent.Field {\n\treturn nil\n}", "func (GoodsSku) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"sku_name\").\n\t\t\tComment(\"sku名称\"),\n\t\tfield.String(\"sku_code\").\n\t\t\tUnique().\n\t\t\tComment(\"sku编码\"),\n\t\tfield.Int(\"stock_num\").\n\t\t\tDefault(0).\n\t\t\tComment(\"库存\"),\n\t\tfield.Int(\"sales_num\").\n\t\t\tDefault(0).\n\t\t\tComment(\"销量\"),\n\t\tfield.Int(\"price\").\n\t\t\tComment(\"价格\"),\n\t}\n}", "func (StockManager) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.Int(\"id\").Positive().Unique(),\n\t\tfield.String(\"Activite\"),\n\t\tfield.Int(\"SemaineA\").Min(-1),\n\t\tfield.Int(\"SemaineB\").Min(-1),\n\t\tfield.Int(\"SemaineC\").Min(-1),\n\t\tfield.Int(\"SemaineD\").Min(-1),\n\t\tfield.Int(\"SemaineE\").Min(-1),\n\t\tfield.Int(\"SemaineF\").Min(-1),\n\t\tfield.Int(\"SemaineG\").Min(-1),\n\t\tfield.Int(\"SemaineH\").Min(-1),\n\t}\n}", "func (Skill) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"name\").NotEmpty(),\n\t\tfield.String(\"type\").NotEmpty(),\n\t\tfield.String(\"job_advancement\"),\n\t\tfield.String(\"icon\").Optional(),\n\t\tfield.String(\"icon_name\").Optional(),\n\t\tfield.Int(\"require_level\").Optional(),\n\t\tfield.String(\"detail\").Optional().Unique(),\n\t\tfield.Int(\"max_level\"),\n\t\tfield.String(\"description\").Optional(),\n\t\tfield.String(\"mechanics_level\").Optional(),\n\t\tfield.String(\"mechanics_detail\").Optional(),\n\t\t// field.Time(\"created_at\").Default(time.Now).Immutable(),\n\t\tfield.Time(\"updated_at\").Default(time.Now).UpdateDefault(time.Now),\n\t}\n}", "func (Hospital) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"hospital\").Unique(),\n\t}\n}", "func (m *RoomdetailMutation) Fields() []string {\n\tfields := make([]string, 0, 6)\n\tif m.roomnumber != nil {\n\t\tfields = append(fields, roomdetail.FieldRoomnumber)\n\t}\n\tif m.roomtypename != nil {\n\t\tfields = append(fields, roomdetail.FieldRoomtypename)\n\t}\n\tif m.roomprice != nil {\n\t\tfields = append(fields, roomdetail.FieldRoomprice)\n\t}\n\tif m.phone != nil {\n\t\tfields = append(fields, roomdetail.FieldPhone)\n\t}\n\tif m.sleep != nil {\n\t\tfields = append(fields, roomdetail.FieldSleep)\n\t}\n\tif m.bed != nil {\n\t\tfields = append(fields, roomdetail.FieldBed)\n\t}\n\treturn fields\n}", "func (Metric) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.Time(\"ts\"),\n\t\tfield.Float(\"value\"),\n\n\t\tfield.Uint(\"endpointId\"),\n\t\tfield.Uint(\"typeId\"),\n\t\tfield.Uint(\"nameId\"),\n\t\tfield.Uint(\"labelId\"),\n\n\t\tfield.Uint(\"clusterId\"),\n\t\tfield.Uint(\"nodeId\"),\n\t\tfield.Uint(\"procesId\"),\n\t\tfield.Uint(\"containerId\"),\n\t}\n}", "func (Menu) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"menuname\").NotEmpty(),\n\t\tfield.String(\"ingredient\"),\n\t\tfield.Int(\"calories\").Positive(),\n\t\tfield.Time(\"added_time\"),\n\t}\n}", "func (md *pcpMetricDesc) Unit() MetricUnit { return md.u }", "func (LevelOfDangerous) Fields() []ent.Field {\r\n\treturn []ent.Field{\r\n\t\tfield.String(\"name\"),\r\n\t}\r\n}", "func (Sample) Fields() []ent.Field {\n\treturn nil\n}", "func (Book) Fields() []ent.Field {\n\treturn []ent.Field{\t\t \n\t\tfield.Time(\"RESERVATIONS\"), \n\t}\n}", "func (User) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\t// 账户名。与LDAP同步\n\t\tfield.String(\"account_name\").NotEmpty().MaxLen(64).Unique().Annotations(entgql.OrderField(\"ACCOUNT_NAME\")),\n\t\t// 人员类型:正式、协力(派遣)、实习、外部\n\t\tfield.Enum(\"staff_type\").Values(\"regular\", \"dispatching\", \"intern\", \"external\"),\n\t\t// 是否在职\n\t\tfield.Bool(\"is_on_job\").Default(false),\n\t\t// 姓。与LDAP同步\n\t\tfield.String(\"family_name\").MaxLen(20).NotEmpty().Annotations(entgql.OrderField(\"FAMILY_NAME\")),\n\t\t// 名。与LDAP同步\n\t\tfield.String(\"given_name\").MaxLen(20).NotEmpty(),\n\t\t// 显示名称,这通常不代表姓和名的组合结果。与LDAP同步\n\t\tfield.String(\"display_name\").MaxLen(20).NotEmpty().Annotations(entgql.OrderField(\"DISPLAY_NAME\")),\n\t\t// 出生日期\n\t\tfield.Time(\"birthday\").Optional().Nillable().SchemaType(DateSchemaType),\n\t\t// 身份证号\n\t\tfield.String(\"id_number\").Unique().MaxLen(18).Optional().Nillable(),\n\t\t// 性别\n\t\tfield.Enum(\"sex\").Values(\"male\", \"female\").Optional().Nillable(),\n\t\t// 常用电话\n\t\tfield.String(\"phone_number\").Optional().Unique().MaxLen(20).Nillable(),\n\t\t// 常驻地址\n\t\tfield.String(\"address\").Optional().MaxLen(255).Nillable(),\n\t\t// 员工编号\n\t\tfield.String(\"staff_id\").Optional().MaxLen(64).Unique(),\n\n\t\t// 个人常用邮箱\n\t\tfield.String(\"personal_email\").Optional().MaxLen(64).Unique(),\n\t\t// 内网工作邮箱。与LDAP同步\n\t\tfield.String(\"intranet_work_email\").NotEmpty().MaxLen(64).Unique(),\n\t\t// 外网工作邮箱\n\t\tfield.String(\"extranet_work_email\").Optional().MaxLen(64).Unique(),\n\n\t\t// 用户头像图片地址\n\t}\n}", "func (Financier) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"name\").NotEmpty(),\n\t}\n}", "func (BankingData) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"bank_account\"),\n\t}\n}", "func (game Game) Field() [9]string {\n\treturn game.field\n}", "func (pce *ppdCacheEntry) getFields() (cdd.PrinterDescriptionSection, string, string, lib.DuplexVendorMap) {\n\tpce.mutex.Lock()\n\tdefer pce.mutex.Unlock()\n\treturn pce.description, pce.manufacturer, pce.model, pce.duplexMap\n}", "func (Car) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"model\"),\n\t\tfield.Time(\"registered_at\"),\n\t}\n}", "func (Status) Fields() []ent.Field {\n\treturn nil\n}", "func (Skill) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.Time(\"created_at\").Default(time.Now).Immutable().Annotations(entproto.Field(2)),\n\t\tfield.Time(\"updated_at\").Default(time.Now).Annotations(entproto.Field(3)),\n\t\tfield.String(\"name\").MaxLen(32).NotEmpty().Annotations(entproto.Field(4)),\n\t\tfield.String(\"logo_url\").Default(\"\").Annotations(entproto.Field(5)),\n\t}\n}", "func (Short) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"key\").NotEmpty().Unique().Immutable(),\n\t\tfield.UUID(\"value\", uuid.UUID{}).Immutable(),\n\t}\n}", "func (Rentalstatus) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"rentalstatus\").Unique(),\n\t}\n}", "func (m *EventRSVPMutation) Fields() []string {\n\tfields := make([]string, 0, 0)\n\treturn fields\n}", "func (m *MedicalrecordstaffMutation) Fields() []string {\n\tfields := make([]string, 0, 0)\n\treturn fields\n}", "func (User) Fields() []ent.Field {\n\t// 添加实体字段\n\treturn []ent.Field{\n\t\tfield.Int(\"age\").Positive(),\n\t\tfield.String(\"name\").NotEmpty(),\n\t\tfield.Bool(\"sex\").Optional(),\n\t\tfield.String(\"address\"),\n\t}\n}", "func (s *Spec) Fields() log.Fields {\n\treturn s.Details.Fields()\n}", "func (e Account) EntFields() ent.Fields { return ent_Account_fields }", "func (e Account) EntFields() ent.Fields { return ent_Account_fields }", "func (Dept) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.Uint(\"id\").StorageKey(\"dept_id\"),\n\t\tfield.String(\"name\"),\n\t\tfield.Uint(\"generation\").Optional().Default(0),\n\t}\n}", "func (InstallmentEntity) Fields() []ent.Field {\n\treturn []ent.Field{\n\n\t\tfield.String(\"name\").NotEmpty(),\n\t\tfield.Int(\"age\").Positive(),\n\t\tfield.String(\"Address\").NotEmpty(),\n\t\tfield.String(\"Phone\").NotEmpty(),\n\t}\n}", "func (m *Float64Measure) Unit() string {\n\treturn m.desc.unit\n}", "func (m *CleaningroomMutation) Fields() []string {\n\tfields := make([]string, 0, 4)\n\tif m.note != nil {\n\t\tfields = append(fields, cleaningroom.FieldNote)\n\t}\n\tif m.dateandstarttime != nil {\n\t\tfields = append(fields, cleaningroom.FieldDateandstarttime)\n\t}\n\tif m.phonenumber != nil {\n\t\tfields = append(fields, cleaningroom.FieldPhonenumber)\n\t}\n\tif m.numofem != nil {\n\t\tfields = append(fields, cleaningroom.FieldNumofem)\n\t}\n\treturn fields\n}", "func (e ProviderWithAudiencesValidationError) Field() string { return e.field }", "func (Meta) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.Time(\"created_at\").\n\t\t\tDefault(time.Now),\n\t\tfield.Time(\"updated_at\").\n\t\t\tDefault(time.Now),\n\t\tfield.String(\"key\"),\n\t\tfield.String(\"value\").MaxLen(4095),\n\t}\n}", "func (Like) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"user_id\"),\n\t}\n}", "func (CounterStaff) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"name\").Unique().NotEmpty(),\n\t\tfield.String(\"email\").Unique().NotEmpty(),\n\t\tfield.String(\"password\").Unique().NotEmpty(),\n\t}\n}", "func (Fund) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"Fund_Name\").NotEmpty(),\n\t}\n}", "func (Nurse) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"nurseName\").NotEmpty(),\n\t\tfield.String(\"nurseUsername\").NotEmpty().Unique(),\n\t\tfield.String(\"nursePassword\").NotEmpty(),\n\t}\n}", "func (Clubapplication) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"contact\").Validate(func(s string) error {\n\t\t\tmatch, _ := regexp.MatchString(\".{10,}\", s)\n\t\t\tif !match {\n\t\t\t\treturn errors.New(\"ข้อมูลติดต่อผิดพลาด กรุณาป้อนข้อมูลอยา่งน้อย 10 ตัวอักษร\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}),\n\t\tfield.String(\"reason\"),\n\t\tfield.Time(\"addeddatetime\"),\n\t\tfield.String(\"addername\").Validate(func(n string) error {\n\t\t\tmatch, _ := regexp.MatchString(\"^[A-Za-zก-๙]+[ \\t\\r\\n\\v\\f]+[A-Za-zก-๙]+[^๐-๙]$\", n)\n\t\t\tif !match {\n\t\t\t\treturn errors.New(\"รูปแบบรายชื่อผิดพลาด กรุณาป้อน ชื่อ วรรค นามสกุล อย่างน้อย 1 ตัว\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}),\n\t\tfield.String(\"discipline\").NotEmpty(),\n\t\tfield.String(\"gender\").NotEmpty(),\n\t\tfield.Int(\"age\").Min(1).Max(200),\n\t\tfield.Int(\"yaer\").Positive(),\n\t}\n}", "func (m *HospitalMutation) Fields() []string {\n\tfields := make([]string, 0, 1)\n\tif m.hospital != nil {\n\t\tfields = append(fields, hospital.FieldHospital)\n\t}\n\treturn fields\n}", "func (m *StatusdMutation) Fields() []string {\n\tfields := make([]string, 0, 1)\n\tif m.statusdname != nil {\n\t\tfields = append(fields, statusd.FieldStatusdname)\n\t}\n\treturn fields\n}", "func (Adult) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.Int(\"Amount\").Unique(),\n\t}\n}", "func (User) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"id\").Comment(\"pk\"),\n\t\tfield.Int(\"latest_hero_score\").Default(0),\n\t\tfield.Int(\"better_hero_score\").Default(0),\n\t\tfield.Time(\"better_hero_score_at\").Optional(),\n\t\tfield.String(\"social_user_id\").Default(\"\"),\n\t\tfield.String(\"social_avatar_url\").Default(\"\"),\n\t\tfield.String(\"social_email\").Default(\"\"),\n\t\tfield.String(\"social_name\").Default(\"\"),\n\t\tfield.String(\"social_type\").Default(\"\"),\n\t\tfield.String(\"social_payload\").Default(\"\"),\n\t\tfield.Uint(\"hero_played\").Default(0).Comment(\"完成過hero\"),\n\t\tfield.Uint(\"hero_repeat\").Default(0).Comment(\"重過玩hero\"),\n\t\tfield.Time(\"created_at\").Optional(),\n\t\tfield.Time(\"updated_at\").Optional(),\n\t}\n}", "func (Device) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.Int32(\"id\").Unique(),\n\t\tfield.String(\"device_id\"),\n\t\tfield.Int32(\"app_version\"),\n\t\tfield.String(\"os_version\"),\n\t}\n}", "func (m *Message) Fields() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"groupId\": m.GroupID,\n\t\t\"memberId\": m.MemberID,\n\t\t\"trial\": m.Trial,\n\t}\n}", "func (e MovableObjectValidationError) Field() string { return e.field }", "func (m *TimerMutation) Fields() []string {\n\tfields := make([]string, 0, 10)\n\tif m.description != nil {\n\t\tfields = append(fields, timer.FieldDescription)\n\t}\n\tif m.projectid != nil {\n\t\tfields = append(fields, timer.FieldProjectid)\n\t}\n\tif m.tags != nil {\n\t\tfields = append(fields, timer.FieldTags)\n\t}\n\tif m.timerStart != nil {\n\t\tfields = append(fields, timer.FieldTimerStart)\n\t}\n\tif m.timerEnd != nil {\n\t\tfields = append(fields, timer.FieldTimerEnd)\n\t}\n\tif m.elapsedSeconds != nil {\n\t\tfields = append(fields, timer.FieldElapsedSeconds)\n\t}\n\tif m._IsBilled != nil {\n\t\tfields = append(fields, timer.FieldIsBilled)\n\t}\n\tif m._IsRunning != nil {\n\t\tfields = append(fields, timer.FieldIsRunning)\n\t}\n\tif m.userid != nil {\n\t\tfields = append(fields, timer.FieldUserid)\n\t}\n\tif m.mandantid != nil {\n\t\tfields = append(fields, timer.FieldMandantid)\n\t}\n\treturn fields\n}", "func (u User) Fields() map[string]interface{} {\n\tfields := map[string]interface{}{\n\t\t\"hash\": u.Hash,\n\t\t\"email\": u.Email,\n\t\t\"private_id\": u.PrivateID,\n\t\t\"public_id\": u.PublicID,\n\t}\n\n\tif u.Profile != nil {\n\t\tfields[\"profile\"] = u.Profile.Fields()\n\t}\n\n\treturn fields\n}", "func (Personal) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"Name\"),\n\t\tfield.String(\"Email\"),\n\t\tfield.String(\"Password\").\n\t\t\tUnique(),\n\t}\n}", "func (m *RoomTypeMutation) Fields() []string {\n\tfields := make([]string, 0, 2)\n\tif m._RoomType != nil {\n\t\tfields = append(fields, roomtype.FieldRoomType)\n\t}\n\tif m._Cost != nil {\n\t\tfields = append(fields, roomtype.FieldCost)\n\t}\n\treturn fields\n}", "func (Sex) Fields() []ent.Field {\n\treturn []ent.Field{}\n}", "func (Tool) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"name\").\n\t\t\tUnique().\n\t\t\tAnnotations(entproto.Field(4)),\n\t\tfield.Bool(\"powered\").\n\t\t\tDefault(false).\n\t\t\tAnnotations(entproto.Field(5)),\n\t}\n}", "func (cs *CharacterMugshotService) Fields() ([]string, error) {\n\tf, err := cs.client.getFields(cs.end)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot get CharacterMugshot fields\")\n\t}\n\n\treturn f, nil\n}", "func (Card) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.Int64(\"id\"),\n\t\tfield.String(\"name\"),\n\t\tfield.String(\"card_no\"),\n\t\tfield.String(\"ccv\"),\n\t\tfield.String(\"expires\"),\n\t\tfield.Time(\"created_at\").\n\t\t\tDefault(time.Now).SchemaType(map[string]string{\n\t\t\tdialect.MySQL: \"datetime\",\n\t\t}),\n\t\tfield.Time(\"updated_at\").\n\t\t\tDefault(time.Now).SchemaType(map[string]string{\n\t\t\tdialect.MySQL: \"datetime\",\n\t\t}),\n\t}\n}", "func (m *EquipmentMutation) Fields() []string {\n\tfields := make([]string, 0, 4)\n\tif m._EQUIPMENTNAME != nil {\n\t\tfields = append(fields, equipment.FieldEQUIPMENTNAME)\n\t}\n\tif m._EQUIPMENTAMOUNT != nil {\n\t\tfields = append(fields, equipment.FieldEQUIPMENTAMOUNT)\n\t}\n\tif m._EQUIPMENTDETAIL != nil {\n\t\tfields = append(fields, equipment.FieldEQUIPMENTDETAIL)\n\t}\n\tif m._EQUIPMENTDATE != nil {\n\t\tfields = append(fields, equipment.FieldEQUIPMENTDATE)\n\t}\n\treturn fields\n}", "func (User) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"field1\"),\n\t\tfield.String(\"field2\"),\n\t\tfield.String(\"field3\").Unique(),\n\t\tfield.String(\"first_name\"),\n\t\tfield.String(\"last_name\"),\n\t}\n}", "func (m *DiagnosisMutation) Fields() []string {\n\tfields := make([]string, 0, 3)\n\tif m._DiagnosticMessages != nil {\n\t\tfields = append(fields, diagnosis.FieldDiagnosticMessages)\n\t}\n\tif m._SurveillancePeriod != nil {\n\t\tfields = append(fields, diagnosis.FieldSurveillancePeriod)\n\t}\n\tif m._DiagnosisDate != nil {\n\t\tfields = append(fields, diagnosis.FieldDiagnosisDate)\n\t}\n\treturn fields\n}", "func (User) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"name\").\n\t\t\tComment(\"This PII field is encrypted before store in the database\").\n\t\t\tOptional(),\n\t\tfield.String(\"nickname\").\n\t\t\tComment(\"This field is stored as-is in the database\"),\n\t}\n}", "func (m *EquipmenttypeMutation) Fields() []string {\n\tfields := make([]string, 0, 1)\n\tif m._EQUIPMENTTYPE != nil {\n\t\tfields = append(fields, equipmenttype.FieldEQUIPMENTTYPE)\n\t}\n\treturn fields\n}", "func (db *DB) GetUnit(ctx context.Context, um *internal.UnitMeta, fields internal.FieldSet) (_ *internal.Unit, err error) {\n\tdefer derrors.Wrap(&err, \"GetUnit(ctx, %q, %q, %q)\", um.Path, um.ModulePath, um.Version)\n\tif experiment.IsActive(ctx, internal.ExperimentGetUnitWithOneQuery) && fields&internal.WithDocumentation|fields&internal.WithReadme != 0 {\n\t\treturn db.getUnitWithAllFields(ctx, um)\n\t}\n\n\tdefer middleware.ElapsedStat(ctx, \"GetUnit\")()\n\tpathID, err := db.getPathID(ctx, um.Path, um.ModulePath, um.Version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu := &internal.Unit{UnitMeta: *um}\n\tif fields&internal.WithReadme != 0 {\n\t\tvar readme *internal.Readme\n\t\tif experiment.IsActive(ctx, internal.ExperimentUnitPage) {\n\t\t\treadme, err = db.getReadme(ctx, pathID)\n\t\t} else {\n\t\t\treadme, err = db.getModuleReadme(ctx, u.ModulePath, u.Version)\n\t\t}\n\t\tif err != nil && !errors.Is(err, derrors.NotFound) {\n\t\t\treturn nil, err\n\t\t}\n\t\tu.Readme = readme\n\t}\n\tif fields&internal.WithDocumentation != 0 {\n\t\tdoc, err := db.getDocumentation(ctx, pathID)\n\t\tif err != nil && !errors.Is(err, derrors.NotFound) {\n\t\t\treturn nil, err\n\t\t}\n\t\tu.Documentation = doc\n\t}\n\tif fields&internal.WithImports != 0 {\n\t\timports, err := db.getImports(ctx, pathID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(imports) > 0 {\n\t\t\tu.Imports = imports\n\t\t\tu.NumImports = len(imports)\n\t\t}\n\t}\n\tif fields&internal.WithLicenses != 0 {\n\t\tlics, err := db.getLicenses(ctx, u.Path, u.ModulePath, pathID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tu.LicenseContents = lics\n\t}\n\tif fields&internal.WithSubdirectories != 0 {\n\t\tpkgs, err := db.getPackagesInUnit(ctx, u.Path, u.ModulePath, u.Version)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tu.Subdirectories = pkgs\n\t}\n\tif db.bypassLicenseCheck {\n\t\tu.IsRedistributable = true\n\t} else {\n\t\tu.RemoveNonRedistributableData()\n\t}\n\treturn u, nil\n}", "func (d *Driver) UnitInfo() dex.UnitInfo {\n\treturn dexeth.UnitInfo\n}", "func (m *PatientrightstypeMutation) Fields() []string {\n\tfields := make([]string, 0, 3)\n\tif m._Permission != nil {\n\t\tfields = append(fields, patientrightstype.FieldPermission)\n\t}\n\tif m._PermissionArea != nil {\n\t\tfields = append(fields, patientrightstype.FieldPermissionArea)\n\t}\n\tif m._Responsible != nil {\n\t\tfields = append(fields, patientrightstype.FieldResponsible)\n\t}\n\treturn fields\n}", "func (m *PatientrecordMutation) Fields() []string {\n\tfields := make([]string, 0, 1)\n\tif m._Name != nil {\n\t\tfields = append(fields, patientrecord.FieldName)\n\t}\n\treturn fields\n}", "func (m *InsuranceMutation) Fields() []string {\n\tfields := make([]string, 0, 1)\n\tif m._Insurancecompany != nil {\n\t\tfields = append(fields, insurance.FieldInsurancecompany)\n\t}\n\treturn fields\n}", "func (m *AbilitypatientrightsMutation) Fields() []string {\n\tfields := make([]string, 0, 3)\n\tif m._Operative != nil {\n\t\tfields = append(fields, abilitypatientrights.FieldOperative)\n\t}\n\tif m._MedicalSupplies != nil {\n\t\tfields = append(fields, abilitypatientrights.FieldMedicalSupplies)\n\t}\n\tif m._Examine != nil {\n\t\tfields = append(fields, abilitypatientrights.FieldExamine)\n\t}\n\treturn fields\n}", "func (e Transaction_MetaValidationError) Field() string { return e.field }", "func (m *RoomInfoMutation) Fields() []string {\n\tfields := make([]string, 0, 1)\n\tif m._Info != nil {\n\t\tfields = append(fields, roominfo.FieldInfo)\n\t}\n\treturn fields\n}", "func (m *MyMetric) FieldList() []*protocol.Field {\n\treturn m.Fields\n}", "func (Builder) Fields() []ent.Field {\n\treturn nil\n}" ]
[ "0.6509575", "0.6264373", "0.6188049", "0.6157899", "0.6049675", "0.60006136", "0.5949632", "0.58961743", "0.58914924", "0.58752203", "0.5866089", "0.58512133", "0.5821629", "0.58048886", "0.57917094", "0.5787944", "0.5744302", "0.5719868", "0.57150173", "0.568302", "0.56660974", "0.5651874", "0.56449443", "0.55937874", "0.55859995", "0.55091476", "0.5503054", "0.54784995", "0.54662704", "0.546212", "0.5461328", "0.54448575", "0.54305655", "0.54121554", "0.5411341", "0.5404881", "0.54004085", "0.5373511", "0.5336374", "0.5328284", "0.5306069", "0.53038377", "0.5291005", "0.528942", "0.5281205", "0.52702135", "0.524325", "0.52382296", "0.52315325", "0.52102494", "0.52072847", "0.51948845", "0.5186777", "0.518439", "0.51838535", "0.5182902", "0.5176642", "0.517286", "0.517286", "0.5170305", "0.515661", "0.5156205", "0.5124822", "0.5117076", "0.51152384", "0.51150435", "0.5110167", "0.5109385", "0.5108518", "0.51079035", "0.51039714", "0.50875205", "0.5086956", "0.50734955", "0.50721115", "0.5070009", "0.5055302", "0.5055181", "0.50511885", "0.5047894", "0.50476366", "0.5041088", "0.5038664", "0.5038411", "0.50351834", "0.50243175", "0.50219697", "0.5020493", "0.50093013", "0.5008648", "0.5007975", "0.50069773", "0.50046253", "0.49995002", "0.49994606", "0.49989462", "0.49988124", "0.4997433", "0.4995552", "0.49955454" ]
0.81105506
0
Edges of the UnitOfMedicine.
func (UnitOfMedicine) Edges() []ent.Edge { return []ent.Edge{ edge.To("Medicine", Medicine.Type), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (Physician) Edges() []ent.Edge {\n return []ent.Edge{\n edge.To(\"Physician\", Patientofphysician.Type),\n \n }\n}", "func (LevelOfDangerous) Edges() []ent.Edge {\r\n\treturn []ent.Edge{\r\n\t\tedge.To(\"Medicine\", Medicine.Type),\r\n\t}\r\n}", "func (Medicalfile) Edges() []ent.Edge {\n return []ent.Edge{\n edge.From(\"dentist\", Dentist.Type).Ref(\"medicalfiles\").Unique(),\n edge.From(\"patient\", Patient.Type).Ref(\"medicalfiles\").Unique(),\n edge.From(\"nurse\", Nurse.Type).Ref(\"medicalfiles\").Unique(),\n edge.From(\"medicalcare\", MedicalCare.Type).Ref(\"medicalfiles\").Unique(),\n edge.To(\"dentalexpenses\", DentalExpense.Type).StorageKey(edge.Column(\"medicalfile_id\")),\n }\n}", "func (Patientroom) Edges() []ent.Edge {\n return []ent.Edge{\n edge.To(\"Patientroom\", Patientofphysician.Type),\n }\n}", "func (g UGraphMat) UEdges() []Edge {\n\treturn g.GraphObject.Edges()\n}", "func (Operationroom) Edges() []ent.Edge {\n return []ent.Edge{\n edge.To(\"operationroom_id\",Booking.Type),\n }\n}", "func (Medicaltreatmentrights) Edges() []ent.Edge {\n\treturn nil\n}", "func (Dentist) Edges() []ent.Edge {\n return []ent.Edge{\n\tedge.From(\"nurse\", Nurse.Type).Ref(\"dentists\").Unique(),\n\tedge.From(\"degree\", Degree.Type).Ref(\"dentists\").Unique(),\n\tedge.From(\"expert\", Expert.Type).Ref(\"dentists\").Unique(),\n\tedge.From(\"gender\", Gender.Type).Ref(\"dentists\").Unique(),\n\n\tedge.To(\"medicalfiles\", Medicalfile.Type).StorageKey(edge.Column(\"dentist_id\")),\n\tedge.To(\"queue\", Queue.Type).StorageKey(edge.Column(\"dentist_id\")),\n\tedge.To(\"appointment\", Appointment.Type).StorageKey(edge.Column(\"dentist_id\")),\n\t\n }\n}", "func (g Undirect) Edge(uid, vid int64) Edge { return g.EdgeBetween(uid, vid) }", "func (MedicalProcedure) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.From(\"Patient\", Patient.Type).Ref(\"PatientToMedicalProcedure\").Unique(),\n\t\tedge.From(\"ProcedureType\", ProcedureType.Type).Ref(\"ProcedureToMedicalProcedure\").Unique(),\n\t\tedge.From(\"Doctor\", Doctor.Type).Ref(\"DoctorToMedicalProcedure\").Unique(),\n\t}\n}", "func (DrugAllergy) Edges() []ent.Edge {\n return []ent.Edge{\n edge. From(\"doctor\",Doctor.Type).Ref(\"Doctor_DrugAllergy\").Unique(),\n edge. From(\"patient\",Patient.Type).Ref(\"Patient_DrugAllergy\").Unique(),\n edge. From(\"medicine\",Medicine.Type).Ref(\"Medicine_DrugAllergy\").Unique(),\n edge. From(\"manner\",Manner.Type).Ref(\"Manner_DrugAllergy\").Unique(),\n }\n }", "func (m *OrgUnitMemberMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 5)\n\tif m.create_by != nil {\n\t\tedges = append(edges, orgunitmember.EdgeCreateBy)\n\t}\n\tif m.update_by != nil {\n\t\tedges = append(edges, orgunitmember.EdgeUpdateBy)\n\t}\n\tif m.user != nil {\n\t\tedges = append(edges, orgunitmember.EdgeUser)\n\t}\n\tif m.position != nil {\n\t\tedges = append(edges, orgunitmember.EdgePosition)\n\t}\n\tif m.belongToOrgUnit != nil {\n\t\tedges = append(edges, orgunitmember.EdgeBelongToOrgUnit)\n\t}\n\treturn edges\n}", "func (m *OrgUnitMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 7)\n\tif m.create_by != nil {\n\t\tedges = append(edges, orgunit.EdgeCreateBy)\n\t}\n\tif m.update_by != nil {\n\t\tedges = append(edges, orgunit.EdgeUpdateBy)\n\t}\n\tif m.members != nil {\n\t\tedges = append(edges, orgunit.EdgeMembers)\n\t}\n\tif m.positions != nil {\n\t\tedges = append(edges, orgunit.EdgePositions)\n\t}\n\tif m.supUnit != nil {\n\t\tedges = append(edges, orgunit.EdgeSupUnit)\n\t}\n\tif m.subUnits != nil {\n\t\tedges = append(edges, orgunit.EdgeSubUnits)\n\t}\n\tif m.belongToOrg != nil {\n\t\tedges = append(edges, orgunit.EdgeBelongToOrg)\n\t}\n\treturn edges\n}", "func (Doctor) Edges() []ent.Edge {\n return []ent.Edge{\n\n edge.From(\"title\", Title.Type).\n Ref(\"doctors\").\n Unique(),\n\n edge.From(\"gender\", Gender.Type).\n Ref(\"doctors\").\n Unique(),\n\n edge.From(\"position\", Position.Type).\n Ref(\"doctors\").\n Unique(),\n\n edge.From(\"disease\", Disease.Type).\n Ref(\"doctors\").\n Unique(),\n\n edge.To(\"offices\", Office.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"departments\", Department.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"schedules\", Schedule.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"trainings\",Training.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"specialdoctors\",Specialdoctor.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"details\", Detail.Type).Unique(),\n }\n}", "func (m *OrgUnitMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 7)\n\tif m.removedmembers != nil {\n\t\tedges = append(edges, orgunit.EdgeMembers)\n\t}\n\tif m.removedpositions != nil {\n\t\tedges = append(edges, orgunit.EdgePositions)\n\t}\n\tif m.removedsubUnits != nil {\n\t\tedges = append(edges, orgunit.EdgeSubUnits)\n\t}\n\treturn edges\n}", "func (m *OrgUnitMemberMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 5)\n\treturn edges\n}", "func (m *RoomdetailMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 8)\n\tif m.pledge != nil {\n\t\tedges = append(edges, roomdetail.EdgePledge)\n\t}\n\tif m.petrule != nil {\n\t\tedges = append(edges, roomdetail.EdgePetrule)\n\t}\n\tif m.bedtype != nil {\n\t\tedges = append(edges, roomdetail.EdgeBedtype)\n\t}\n\tif m.employee != nil {\n\t\tedges = append(edges, roomdetail.EdgeEmployee)\n\t}\n\tif m.jobposition != nil {\n\t\tedges = append(edges, roomdetail.EdgeJobposition)\n\t}\n\tif m.staytype != nil {\n\t\tedges = append(edges, roomdetail.EdgeStaytype)\n\t}\n\tif m.roomdetails != nil {\n\t\tedges = append(edges, roomdetail.EdgeRoomdetails)\n\t}\n\tif m.cleaningrooms != nil {\n\t\tedges = append(edges, roomdetail.EdgeCleaningrooms)\n\t}\n\treturn edges\n}", "func (Roomtype) Edges() []ent.Edge {\n return []ent.Edge{\n edge.To(\"Room1\", Room.Type),\n }\n}", "func (g UndirectWeighted) Edge(uid, vid int64) Edge { return g.WeightedEdgeBetween(uid, vid) }", "func (m *EmployeeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedarea != nil {\n\t\tedges = append(edges, employee.EdgeArea)\n\t}\n\treturn edges\n}", "func (m *MedicalrecordstaffMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removed_MedicalrecordstaffPatientrights != nil {\n\t\tedges = append(edges, medicalrecordstaff.EdgeMedicalrecordstaffPatientrights)\n\t}\n\treturn edges\n}", "func (Financier) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.To(\"bills\", Bill.Type).StorageKey(edge.Column(\"officer_id\")),\n\t\tedge.From(\"user\", User.Type).Ref(\"financier\").Unique(),\n\t}\n}", "func (m *MemberMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 3)\n\tif m.removedpayment != nil {\n\t\tedges = append(edges, member.EdgePayment)\n\t}\n\tif m.removedbookcourse != nil {\n\t\tedges = append(edges, member.EdgeBookcourse)\n\t}\n\tif m.removedequipmentrental != nil {\n\t\tedges = append(edges, member.EdgeEquipmentrental)\n\t}\n\treturn edges\n}", "func (PatientInfo) Edges() []ent.Edge {\r\n\treturn []ent.Edge{\r\n\t\tedge.To(\"drugallergys\", DrugAllergy.Type).StorageKey(edge.Column(\"patient_id\")),\r\n\t\tedge.To(\"patientprescription\", Prescription.Type).StorageKey(edge.Column(\"patient_id\")),\r\n\t}\r\n}", "func (m *EventRSVPMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 2)\n\treturn edges\n}", "func (Short) Edges() []ent.Edge {\n\treturn nil\n}", "func (m *BedtypeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedroomdetails != nil {\n\t\tedges = append(edges, bedtype.EdgeRoomdetails)\n\t}\n\treturn edges\n}", "func (m *AreaMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 4)\n\treturn edges\n}", "func (m *AreaMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 4)\n\treturn edges\n}", "func (Category) Edges() []ent.Edge {\n\treturn nil\n}", "func (Examinationroom) Edges() []ent.Edge {\n return []ent.Edge{\n\t\tedge. To(\"Examinationroom_Operativerecord\",Operativerecord.Type).StorageKey(edge.Column(\"Examinationroom_id\")),\n }\n }", "func (Machine) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.From(\"ips\", IP.Type).Ref(\"machine\").Annotations(entproto.Field(5)),\n\t\tedge.From(\"users\", User.Type).Ref(\"machines\").Annotations(entproto.Field(6)),\n\t\tedge.From(\"keys\", Key.Type).Ref(\"machine\").Annotations(entproto.Field(7)),\n\t}\n}", "func (FurnitureType) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.To(\"details\", FurnitureDetail.Type).StorageKey(edge.Column(\"type_id\")),\n\t}\n}", "func (m *OrgUnitPositionMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 4)\n\tif m.removedbelongToOrgUnitMembers != nil {\n\t\tedges = append(edges, orgunitposition.EdgeBelongToOrgUnitMembers)\n\t}\n\treturn edges\n}", "func (m *InsuranceMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removed_InsurancePatientrights != nil {\n\t\tedges = append(edges, insurance.EdgeInsurancePatientrights)\n\t}\n\treturn edges\n}", "func (m *RoomdetailMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 8)\n\tif m.removedcleaningrooms != nil {\n\t\tedges = append(edges, roomdetail.EdgeCleaningrooms)\n\t}\n\treturn edges\n}", "func (Diagnosis) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\n\t\tedge.From(\"Doctor_name\", Doctor.Type).\n\t\t\tRef(\"DoctorToDiagnosis\").\n\t\t\tUnique(),\n\t\tedge.From(\"Patient\", Patient.Type).\n\t\t\tRef(\"PatientToDiagnosis\").\n\t\t\tUnique(),\n\t\tedge.From(\"type\", TreatmentType.Type).\n\t\t\tRef(\"TreatmentTypeToDiagnosis\").\n\t\t\tUnique(),\n\t}\n}", "func (m *EmployeeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 8)\n\tif m.removedarea != nil {\n\t\tedges = append(edges, employee.EdgeArea)\n\t}\n\tif m.removeddisease != nil {\n\t\tedges = append(edges, employee.EdgeDisease)\n\t}\n\tif m.removeddrug != nil {\n\t\tedges = append(edges, employee.EdgeDrug)\n\t}\n\tif m.removeddiagnosis != nil {\n\t\tedges = append(edges, employee.EdgeDiagnosis)\n\t}\n\tif m.removedpatient != nil {\n\t\tedges = append(edges, employee.EdgePatient)\n\t}\n\treturn edges\n}", "func (m *StatisticMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedarea != nil {\n\t\tedges = append(edges, statistic.EdgeArea)\n\t}\n\treturn edges\n}", "func (m *StatisticMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedarea != nil {\n\t\tedges = append(edges, statistic.EdgeArea)\n\t}\n\treturn edges\n}", "func (m *EquipmenttypeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 2)\n\tif m.removedequipment != nil {\n\t\tedges = append(edges, equipmenttype.EdgeEquipment)\n\t}\n\tif m.removedequipmentrental != nil {\n\t\tedges = append(edges, equipmenttype.EdgeEquipmentrental)\n\t}\n\treturn edges\n}", "func (m *EmployeeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 8)\n\tif m.removedpayment != nil {\n\t\tedges = append(edges, employee.EdgePayment)\n\t}\n\tif m.removedequipment != nil {\n\t\tedges = append(edges, employee.EdgeEquipment)\n\t}\n\tif m.removedbookcourse != nil {\n\t\tedges = append(edges, employee.EdgeBookcourse)\n\t}\n\tif m.removedequipmentrental != nil {\n\t\tedges = append(edges, employee.EdgeEquipmentrental)\n\t}\n\tif m.removedpromotion != nil {\n\t\tedges = append(edges, employee.EdgePromotion)\n\t}\n\treturn edges\n}", "func (m *AreaMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 4)\n\tif m.disease != nil {\n\t\tedges = append(edges, area.EdgeDisease)\n\t}\n\tif m.statistic != nil {\n\t\tedges = append(edges, area.EdgeStatistic)\n\t}\n\tif m.level != nil {\n\t\tedges = append(edges, area.EdgeLevel)\n\t}\n\tif m.employee != nil {\n\t\tedges = append(edges, area.EdgeEmployee)\n\t}\n\treturn edges\n}", "func (m *AreaMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 4)\n\tif m.disease != nil {\n\t\tedges = append(edges, area.EdgeDisease)\n\t}\n\tif m.statistic != nil {\n\t\tedges = append(edges, area.EdgeStatistic)\n\t}\n\tif m.level != nil {\n\t\tedges = append(edges, area.EdgeLevel)\n\t}\n\tif m.employee != nil {\n\t\tedges = append(edges, area.EdgeEmployee)\n\t}\n\treturn edges\n}", "func (m *EquipmentrentalMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 4)\n\treturn edges\n}", "func (m *PurposeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedcarcheckinout != nil {\n\t\tedges = append(edges, purpose.EdgeCarcheckinout)\n\t}\n\treturn edges\n}", "func (m *OrganizationMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 4)\n\tif m.removedunits != nil {\n\t\tedges = append(edges, organization.EdgeUnits)\n\t}\n\tif m.removedstaffs != nil {\n\t\tedges = append(edges, organization.EdgeStaffs)\n\t}\n\treturn edges\n}", "func (m *OperativerecordMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 4)\n\tif m._Examinationroom != nil {\n\t\tedges = append(edges, operativerecord.EdgeExaminationroom)\n\t}\n\tif m._Nurse != nil {\n\t\tedges = append(edges, operativerecord.EdgeNurse)\n\t}\n\tif m._Operative != nil {\n\t\tedges = append(edges, operativerecord.EdgeOperative)\n\t}\n\tif m._Tool != nil {\n\t\tedges = append(edges, operativerecord.EdgeTool)\n\t}\n\treturn edges\n}", "func (m *EquipmentMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 5)\n\tif m.removedequipmentrental != nil {\n\t\tedges = append(edges, equipment.EdgeEquipmentrental)\n\t}\n\treturn edges\n}", "func (g DenseGraph) M() int {\n\treturn g.NumberOfEdges\n}", "func (m *PlaceMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedemployee != nil {\n\t\tedges = append(edges, place.EdgeEmployee)\n\t}\n\treturn edges\n}", "func (m *MemberMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 3)\n\tif m.payment != nil {\n\t\tedges = append(edges, member.EdgePayment)\n\t}\n\tif m.bookcourse != nil {\n\t\tedges = append(edges, member.EdgeBookcourse)\n\t}\n\tif m.equipmentrental != nil {\n\t\tedges = append(edges, member.EdgeEquipmentrental)\n\t}\n\treturn edges\n}", "func (m *DiseasetypeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removeddisease != nil {\n\t\tedges = append(edges, diseasetype.EdgeDisease)\n\t}\n\treturn edges\n}", "func (m *InsuranceMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedinsuranceof != nil {\n\t\tedges = append(edges, insurance.EdgeInsuranceof)\n\t}\n\treturn edges\n}", "func (m *PatientrightsMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 4)\n\tif m._PatientrightsPatientrightstype != nil {\n\t\tedges = append(edges, patientrights.EdgePatientrightsPatientrightstype)\n\t}\n\tif m._PatientrightsInsurance != nil {\n\t\tedges = append(edges, patientrights.EdgePatientrightsInsurance)\n\t}\n\tif m._PatientrightsPatientrecord != nil {\n\t\tedges = append(edges, patientrights.EdgePatientrightsPatientrecord)\n\t}\n\tif m._PatientrightsMedicalrecordstaff != nil {\n\t\tedges = append(edges, patientrights.EdgePatientrightsMedicalrecordstaff)\n\t}\n\treturn edges\n}", "func (Patientrecord) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\t\n\t\tedge.To(\"PatientrecordPatientrights\", Patientrights.Type).StorageKey(edge.Column(\"Patientrecord_id\")),\n\n }\n}", "func (m *EquipmentrentalMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 4)\n\tif m.equipment != nil {\n\t\tedges = append(edges, equipmentrental.EdgeEquipment)\n\t}\n\tif m.employee != nil {\n\t\tedges = append(edges, equipmentrental.EdgeEmployee)\n\t}\n\tif m.member != nil {\n\t\tedges = append(edges, equipmentrental.EdgeMember)\n\t}\n\tif m.equipmenttype != nil {\n\t\tedges = append(edges, equipmentrental.EdgeEquipmenttype)\n\t}\n\treturn edges\n}", "func (Metric) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.From(\"MetricName_Metrics\", MetricName.Type).Ref(\"metrics\").Unique(),\n\t\tedge.From(\"MetricEndpoint_Metrics\", MetricEndpoint.Type).Ref(\"metrics\").Unique(),\n\t\tedge.From(\"MetricLabel_Metrics\", MetricLabel.Type).Ref(\"metrics\").Unique(),\n\t}\n}", "func main() {\n //Leemos el archivo\n data, err := ioutil.ReadFile(\"g.txt\")\n if err != nil {\n fmt.Println(\"File reading error\", err)\n return\n }\n rows := strings.Split(string(data), \"\\n\")\n //Inicializamos los vertices\n numOfVertexes, _ := strconv.Atoi(rows[0])\n vertexesG := make([]Vertex, numOfVertexes)\n count := 0;\n for count < numOfVertexes {\n vertexesG[count] = Vertex{count, 0.1, 0.2}\n count = count + 1\n }\n\n //Inicializamos las aristas con los valores del archivo\n numOfEdges := len(rows)-2\n edgesG := make([]Edge, numOfEdges)\n count = 0;\n for count < numOfEdges {\n vertIndx := strings.Split(rows[count+1], \",\") \n vertex1, _ := strconv.Atoi(vertIndx[0])\n vertex2, _ := strconv.Atoi(vertIndx[1])\n edgesG[count] = Edge{vertex1,vertex2,1}\n count = count + 1\n }\n\n //De lo anterior tenemos el conjuinto de vertices vertexesG y el conjunto de aristas edgesG\n //El apuntador a estos dos conjuntos se les pasara a todas las hormigas para que los compartan\n //Y todas tengan los mismos conjuntos\n\n\n //VARIABLE QUE NOS DIRA CUANTAS VECES SE HA ENCONTRADO UNA SOLUCION CON EL MISMO TAMAÑO DE MANERA CONSECUTIVA \n numSinCambios := 0\n //Variable que nos dice el numero de veces que si se encuentra la solcucion con el mismo tamaño se detendra el algoritmo\n numIteracionSeguidasParaParar := 200\n //Defefine el numero de hormigas que tendremos\n numberOfAnts := 20\n //Calculamos la cantidad de aristas que debe de tener la greafica comompleta dada la cantidad de vertices que tenemos\n numOfEdgesFull := (numOfVertexes*(numOfVertexes-1))/2\n\n //VARIABLES QUE DEFINE LOS PARAMETROS DEL ALGORITMO\n q := 0.5\n evaporation_rate := 0.2\n pheromone_adjust := 0.12\n //beta := 1\n //VARIABLES QUE DEFINE LOS PARAMETROS DEL ALGORITMO\n\n //Semilla para la funcion de numeros aleatorios\n randomSeed := 1\n //La funcion para generar los numeros aletarios que usaremos\n randomFun := rand.New(rand.NewSource(int64(randomSeed)))\n min_sol := make([]int, numOfVertexes)\n \n //Inicializamos un slice de hormigas con la cantida de hormigas definida\n ants := make([]Ant, numberOfAnts)\n\n antCount := 0\n //Para cada hormiga le vamos a crear su slice de edgesFull, una grafica con el apuntaos a los vertifces\n //el aputnador a las aristas y sus aristas que rempresentaran a la grafica completa\n //Depues vamos a crear ala hormiga con los parametros\n //La grafica que le creamos, un slice de enteros para sus soluciones, la funcion aleatorio y los parametros \n for antCount < numberOfAnts {\n edgesFull := make([]Edge, numOfEdgesFull)\n graphG := Graph{&vertexesG,&edgesG,&edgesFull}\n slice := make([]int, 0)\n ants[antCount] = Ant{antCount,&graphG,randomFun, &slice,q,evaporation_rate,pheromone_adjust}\n antCount = antCount +1\n }\n \n ///////////////////////////////////\n //////////ALGORITMO ACO////////////\n ///////////////////////////////////\n //Mientras no se tengan numIteracionSeguidasParaParar sin cambios en la solucion\n //Ejecutaremos el siguiente ciclo\n\n////////////////////////CICLO////////////////////////////\n for numSinCambios <= numIteracionSeguidasParaParar{\n\n //fmt.Printf(\"Sin cambios: %d\\n\", numSinCambios)\n\n //Inicializamos a cada una de las hormigas estos es \n //Inicializar la grafica full\n //Inicialar el slice de soluciones\n //no necesitamos poner a la hormiga en un vertice arbitratio en un principio por que desde todos los vertices tenemos conexion a todos por ser grafica\n //a todos por ser grafica completa\n antCount = 0\n for antCount < numberOfAnts {\n (*ants[antCount].graph).initFull()\n ants[antCount].BorraSolucion()\n antCount = antCount +1\n } \n \n //Mientras alguno de las hormigas pueda dar un paso\n sePuedeDarPaso := true\n for sePuedeDarPaso != false {\n\n otroPaso := false\n antCount = 0\n //Verificamos si alguna de las hormigas todavia puede dar un paso\n for antCount < numberOfAnts {\n if ants[antCount].PuedeDarUnPaso(){\n otroPaso = true\n }\n antCount = antCount +1\n }\n \n //Si alguna hormiga todavia puede dar un paso\n sePuedeDarPaso = otroPaso\n if sePuedeDarPaso{\n antCount = 0\n for antCount < numberOfAnts {\n //Verificamos si la hormiga con index antCount puede dar un paso y si es asi\n //Esta damos el paso con esta hormiga\n if ants[antCount].PuedeDarUnPaso(){\n\n////////////////////////PASO//////////////////////////// (VER EL LA FUNCION)\n ants[antCount].Paso() \n }\n antCount = antCount +1\n }\n }\n }\n\n///////////TODAS LAS HORMIGAS COMPLETAN SU TRAYECTO/////\n //Una vez que ya se dieron todos los pasos posibles debemo encontrar la mejor solucion guardarla y actualziar la feromona\n antCount = 0\n //El tamaño de la solucion minima\n minSolLen := -1\n //El indice de la hormiga que tiene la solucion minima\n minSolIndex := -1\n //Buscamos la solucion minima entre todas las hormigas\n for antCount < numberOfAnts {\n solLen := len((*ants[antCount].solution))\n if minSolLen > solLen || minSolLen < 0{\n minSolLen = solLen\n minSolIndex = antCount\n }\n antCount = antCount +1\n }\n ants[minSolIndex].PrintSolution()\n\n //Verificamos que la solucioon mejor encontrada en este ciclo sea mejor que minima solucion actual\n if len(min_sol) >= len((*ants[minSolIndex].solution)){\n //Si la solucion tiene el mismo tamaño entonces sumaos 1 al contador de ciclos sin con el mismo \n //Tamaño de solucion\n if len(min_sol) == len((*ants[minSolIndex].solution)){\n numSinCambios = numSinCambios+1\n //Si la solucion es mas pequeña regreamos el contador a 0 \n }else{\n numSinCambios = 0\n }\n\n //Borramos la mejor solucion anterior\n min_sol = make([]int, len((*ants[minSolIndex].solution)))\n countSolIndex := 0\n //Copiamos la nueva solucion minima\n for countSolIndex < len(min_sol){\n min_sol[countSolIndex] = (*ants[minSolIndex].solution)[countSolIndex]\n countSolIndex = countSolIndex +1\n }\n }\n\n countSolIndex := 0\n //Imprimimos la mejor solucion hasta el momento\n fmt.Printf(\"MejorSolucion: \")\n for countSolIndex < len(min_sol){\n fmt.Printf(\"%d \", min_sol[countSolIndex])\n countSolIndex = countSolIndex +1\n }\n fmt.Printf(\"\\n\")\n\n////////////////////////ACTUALIZACION GLOBAL////////////////////////////\n //Por ultimo vamos a hacer la actualizacion de la feromona de manera GLOBAL\n countVertexIndex := 0\n //Para cada uno de los vertices calculamos el nuevo valor de la hormona considerando la evaporacion\n for countVertexIndex < len(vertexesG){\n vertexIndex := vertexesG[countVertexIndex].index\n vertexPheromone := vertexesG[countVertexIndex].pheromone\n newPheromoneValue := (1-evaporation_rate)*vertexPheromone\n addedPheromone := 0.0\n countSolIndex := 0\n //Si el vertice es parte de la solucion minima actual entonces tambien calculamos la feromona extra que se le sumara\n for countSolIndex < len(min_sol){\n if vertexIndex == min_sol[countSolIndex]{\n addedPheromone = evaporation_rate*(1.0/float64((len(min_sol))))\n //fmt.Printf(\"AddedPhero %f\\n\",addedPheromone)\n }\n countSolIndex = countSolIndex +1\n } \n //Actualizamos el valor de la feromona\n vertexesG[countVertexIndex].pheromone = newPheromoneValue + addedPheromone\n countVertexIndex = countVertexIndex +1\n }\n\n }\n\n}", "func (m *DiseaseMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedarea != nil {\n\t\tedges = append(edges, disease.EdgeArea)\n\t}\n\treturn edges\n}", "func (m *NurseMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removed_Nurse_Operativerecord != nil {\n\t\tedges = append(edges, nurse.EdgeNurseOperativerecord)\n\t}\n\treturn edges\n}", "func (m *HospitalMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 2)\n\tif m.removedreceive != nil {\n\t\tedges = append(edges, hospital.EdgeReceive)\n\t}\n\tif m.removedsend != nil {\n\t\tedges = append(edges, hospital.EdgeSend)\n\t}\n\treturn edges\n}", "func (m *RentalstatusMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedrepairinvoices != nil {\n\t\tedges = append(edges, rentalstatus.EdgeRepairinvoices)\n\t}\n\treturn edges\n}", "func (m *EventRSVPMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 2)\n\tif m.event != nil {\n\t\tedges = append(edges, eventrsvp.EdgeEvent)\n\t}\n\tif m.invitee != nil {\n\t\tedges = append(edges, eventrsvp.EdgeInvitee)\n\t}\n\treturn edges\n}", "func (Hospital) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.To(\"receive\",Transport.Type).\n\t\tStorageKey(edge.Column(\"receive\")),\n\t\tedge.To(\"send\",Transport.Type).\n\t\tStorageKey(edge.Column(\"send\")),\n\t}\n}", "func (m *PromotionamountMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedpromotion != nil {\n\t\tedges = append(edges, promotionamount.EdgePromotion)\n\t}\n\treturn edges\n}", "func (m *CleaningroomMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 4)\n\tif m.roomdetail != nil {\n\t\tedges = append(edges, cleaningroom.EdgeRoomdetail)\n\t}\n\tif m._Cleanername != nil {\n\t\tedges = append(edges, cleaningroom.EdgeCleanername)\n\t}\n\tif m._Lengthtime != nil {\n\t\tedges = append(edges, cleaningroom.EdgeLengthtime)\n\t}\n\tif m._Employee != nil {\n\t\tedges = append(edges, cleaningroom.EdgeEmployee)\n\t}\n\treturn edges\n}", "func (m *PetruleMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedroomdetails != nil {\n\t\tedges = append(edges, petrule.EdgeRoomdetails)\n\t}\n\treturn edges\n}", "func (m *RepairinvoiceMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\treturn edges\n}", "func (m *BillingstatusMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedbillingstatuss != nil {\n\t\tedges = append(edges, billingstatus.EdgeBillingstatuss)\n\t}\n\treturn edges\n}", "func (m *MedicalrecordstaffMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m._MedicalrecordstaffPatientrights != nil {\n\t\tedges = append(edges, medicalrecordstaff.EdgeMedicalrecordstaffPatientrights)\n\t}\n\treturn edges\n}", "func (m *PatientrightstypeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 2)\n\tif m.removed_PatientrightstypePatientrights != nil {\n\t\tedges = append(edges, patientrightstype.EdgePatientrightstypePatientrights)\n\t}\n\treturn edges\n}", "func insertRelationEdges(conn types.TGConnection, gof types.TGGraphObjectFactory, houseMemberTable map[string]types.TGNode) {\n\tfmt.Println(\">>>>>>> Entering InsertRelationEdges: Insert Few Family Relations with individual properties <<<<<<<\")\n\n\t// Insert edge data into database\n\t// Two edge types defined in ancestry-initdb.conf.\n\t// Added year of marriage and place of marriage edge attributes for spouseEdge desc\n\t// Added Birth order edge attribute for offspringEdge desc\n\n\tgmd, err := conn.GetGraphMetadata(true)\n\tif err != nil {\n\t\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges - error during conn.GetGraphMetadata <<<<<<<\")\n\t\treturn\n\t}\n\n\tspouseEdgeType, err := gmd.GetEdgeType(\"spouseEdge\")\n\tif err != nil {\n\t\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges - error during conn.GetEdgeType('spouseEdge') <<<<<<<\")\n\t\treturn\n\t}\n\tif spouseEdgeType != nil {\n\t\tfmt.Printf(\">>>>>>> 'spouseEdge' is found with %d attributes <<<<<<<\\n\", len(spouseEdgeType.GetAttributeDescriptors()))\n\t} else {\n\t\tfmt.Println(\">>>>>>> 'spouseEdge' is not found from meta data fetch <<<<<<<\")\n\t\treturn\n\t}\n\n\toffspringEdgeType, err := gmd.GetEdgeType(\"offspringEdge\")\n\tif err != nil {\n\t\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges - error during conn.GetEdgeType('offspringEdge') <<<<<<<\")\n\t\treturn\n\t}\n\tif offspringEdgeType != nil {\n\t\tfmt.Printf(\">>>>>>> 'offspringEdgeType' is found with %d attributes <<<<<<<\\n\", len(offspringEdgeType.GetAttributeDescriptors()))\n\t} else {\n\t\tfmt.Println(\">>>>>>> 'spouseEdge' is not found from meta data fetch <<<<<<<\")\n\t\treturn\n\t}\n\n\tfor _, houseRelation := range HouseRelationData {\n\t\thouseMemberFrom := houseMemberTable[houseRelation.FromMemberName]\n\t\thouseMemberTo := houseMemberTable[houseRelation.ToMemberName]\n\t\trelationName := houseRelation.RelationDesc\n\t\tfmt.Printf(\">>>>>>> Inside InsertRelationEdges: trying to create edge('%s'): From '%s' To '%s' <<<<<<<\\n\", relationName, houseRelation.FromMemberName, houseRelation.ToMemberName)\n\t\t//var relationDirection types.TGDirectionType\n\t\tif relationName == \"spouse\" {\n\t\t\t//relationDirection = types.DirectionTypeUnDirected\n\t\t\tspouseEdgeType.GetFromNodeType()\n\t\t\tedge1, err := gof.CreateEdgeWithEdgeType(houseMemberFrom, houseMemberTo, spouseEdgeType)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges - error during gof.CreateEdgeWithEdgeType(spouseEdgeType) <<<<<<<\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_ = edge1.SetOrCreateAttribute(\"yearMarried\", houseRelation.Attribute1)\n\t\t\t_ = edge1.SetOrCreateAttribute(\"placeMarried\", houseRelation.Attribute2)\n\t\t\t_ = edge1.SetOrCreateAttribute(\"relType\", relationName)\n\t\t\terr = conn.InsertEntity(edge1)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges - error during conn.InsertEntity(edge1) <<<<<<<\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_, err = conn.Commit()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges w/ error during conn.Commit() <<<<<<<\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Printf(\">>>>>>> Inside InsertRelationEdges: Successfully added edge(spouse): From '%+v' To '%+v' <<<<<<<\\n\", houseRelation.FromMemberName, houseRelation.ToMemberName)\n\t\t} else {\n\t\t\t//relationDirection = types.DirectionTypeDirected\n\t\t\tedge1, err := gof.CreateEdgeWithEdgeType(houseMemberFrom, houseMemberTo, offspringEdgeType)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges - error during gof.CreateEdgeWithEdgeType(offspringEdgeType) <<<<<<<\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_ = edge1.SetOrCreateAttribute(\"birthOrder\", houseRelation.Attribute1)\n\t\t\t_ = edge1.SetOrCreateAttribute(\"relType\", relationName)\n\t\t\terr = conn.InsertEntity(edge1)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges - error during conn.InsertEntity(edge1) <<<<<<<\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_, err = conn.Commit()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges w/ error during conn.Commit() <<<<<<<\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Printf(\">>>>>>> Inside InsertRelationEdges: Successfully added edge: From '%+v' To '%+v' <<<<<<<\\n\", houseRelation.FromMemberName, houseRelation.ToMemberName)\n\t\t}\n\t} // End of for loop\n\n\tfmt.Println(\">>>>>>> Successfully added edges w/ NO ERRORS !!! <<<<<<<\")\n\tfmt.Println(\">>>>>>> Returning from InsertRelationEdges w/ NO ERRORS !!! <<<<<<<\")\n}", "func (m *DiseaseMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 6)\n\tif m.removedarea != nil {\n\t\tedges = append(edges, disease.EdgeArea)\n\t}\n\tif m.removeddrug != nil {\n\t\tedges = append(edges, disease.EdgeDrug)\n\t}\n\treturn edges\n}", "func (m *InsuranceMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m._InsurancePatientrights != nil {\n\t\tedges = append(edges, insurance.EdgeInsurancePatientrights)\n\t}\n\treturn edges\n}", "func (c *MedicineClient) QueryUnitOfMedicine(m *Medicine) *UnitOfMedicineQuery {\n\tquery := &UnitOfMedicineQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := m.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(medicine.Table, medicine.FieldID, id),\n\t\t\tsqlgraph.To(unitofmedicine.Table, unitofmedicine.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, medicine.UnitOfMedicineTable, medicine.UnitOfMedicineColumn),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(m.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}", "func (m *ExaminationroomMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removed_Examinationroom_Operativerecord != nil {\n\t\tedges = append(edges, examinationroom.EdgeExaminationroomOperativerecord)\n\t}\n\treturn edges\n}", "func (m *PromotiontypeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedpromotion != nil {\n\t\tedges = append(edges, promotiontype.EdgePromotion)\n\t}\n\treturn edges\n}", "func (m *CleanernameMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedcleaningrooms != nil {\n\t\tedges = append(edges, cleanername.EdgeCleaningrooms)\n\t}\n\treturn edges\n}", "func (m *EquipmentMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 5)\n\tif m.classifier != nil {\n\t\tedges = append(edges, equipment.EdgeClassifier)\n\t}\n\tif m.employee != nil {\n\t\tedges = append(edges, equipment.EdgeEmployee)\n\t}\n\tif m.equipmenttype != nil {\n\t\tedges = append(edges, equipment.EdgeEquipmenttype)\n\t}\n\tif m.zone != nil {\n\t\tedges = append(edges, equipment.EdgeZone)\n\t}\n\tif m.equipmentrental != nil {\n\t\tedges = append(edges, equipment.EdgeEquipmentrental)\n\t}\n\treturn edges\n}", "func (m *AbilitypatientrightsMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removed_AbilitypatientrightsPatientrightstype != nil {\n\t\tedges = append(edges, abilitypatientrights.EdgeAbilitypatientrightsPatientrightstype)\n\t}\n\treturn edges\n}", "func (m *LevelMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedarea != nil {\n\t\tedges = append(edges, level.EdgeArea)\n\t}\n\treturn edges\n}", "func (m *LevelMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedarea != nil {\n\t\tedges = append(edges, level.EdgeArea)\n\t}\n\treturn edges\n}", "func (m *EmployeeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 6)\n\tif m.removedemployees != nil {\n\t\tedges = append(edges, employee.EdgeEmployees)\n\t}\n\tif m.removedleasess != nil {\n\t\tedges = append(edges, employee.EdgeLeasess)\n\t}\n\tif m.removedroomdetails != nil {\n\t\tedges = append(edges, employee.EdgeRoomdetails)\n\t}\n\tif m.removedrepairinvoices != nil {\n\t\tedges = append(edges, employee.EdgeRepairinvoices)\n\t}\n\tif m.removedcleaningrooms != nil {\n\t\tedges = append(edges, employee.EdgeCleaningrooms)\n\t}\n\treturn edges\n}", "func (BinaryItem) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.From(\"transaction\", Transaction.Type).\n\t\t\tRef(\"images\").\n\t\t\tUnique(),\n\t}\n}", "func (m *PatientrightsMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 4)\n\treturn edges\n}", "func (m *CarregisterMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 0)\n\treturn edges\n}", "func (m *EquipmenttypeMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 2)\n\tif m.equipment != nil {\n\t\tedges = append(edges, equipmenttype.EdgeEquipment)\n\t}\n\tif m.equipmentrental != nil {\n\t\tedges = append(edges, equipmenttype.EdgeEquipmentrental)\n\t}\n\treturn edges\n}", "func (m *BloodtypeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedpatient != nil {\n\t\tedges = append(edges, bloodtype.EdgePatient)\n\t}\n\treturn edges\n}", "func (m *RepairinvoiceMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 3)\n\tif m.employee != nil {\n\t\tedges = append(edges, repairinvoice.EdgeEmployee)\n\t}\n\tif m._Rentalstatus != nil {\n\t\tedges = append(edges, repairinvoice.EdgeRentalstatus)\n\t}\n\tif m._Lease != nil {\n\t\tedges = append(edges, repairinvoice.EdgeLease)\n\t}\n\treturn edges\n}", "func (m *PromotionMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 5)\n\tif m.removedpayment != nil {\n\t\tedges = append(edges, promotion.EdgePayment)\n\t}\n\treturn edges\n}", "func (m *PatientMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 6)\n\tif m.removeddiagnosis != nil {\n\t\tedges = append(edges, patient.EdgeDiagnosis)\n\t}\n\treturn edges\n}", "func (m *GenderMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.removedpatient != nil {\n\t\tedges = append(edges, gender.EdgePatient)\n\t}\n\treturn edges\n}", "func (m *EmployeeMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 3)\n\tif m.removedwhose != nil {\n\t\tedges = append(edges, employee.EdgeWhose)\n\t}\n\tif m.removedemployeestock != nil {\n\t\tedges = append(edges, employee.EdgeEmployeestock)\n\t}\n\tif m.removedformemployee != nil {\n\t\tedges = append(edges, employee.EdgeFormemployee)\n\t}\n\treturn edges\n}", "func (BankingData) Edges() []ent.Edge {\n\treturn nil\n}", "func (m *PurposeMutation) AddedEdges() []string {\n\tedges := make([]string, 0, 1)\n\tif m.carcheckinout != nil {\n\t\tedges = append(edges, purpose.EdgeCarcheckinout)\n\t}\n\treturn edges\n}", "func (m *CarCheckInOutMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 3)\n\treturn edges\n}", "func (msaNodes *msaNodes) drawEdges() error {\n\t// getFirstNode returns the nodeID for the first node in the MSA derived from a specified sequence\n\tgetFirstNode := func(seqID string) int {\n\t\tfor nodeID := 1; nodeID <= len(msaNodes.nodeHolder); nodeID++ {\n\t\t\tfor _, id := range msaNodes.nodeHolder[nodeID].parentSeqIDs {\n\t\t\t\tif seqID == id {\n\t\t\t\t\t\treturn nodeID\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn 0\n\t}\n\t// findNextNode returns the ID of the next node that is derived from a query MSA sequence\n\tfindNextNode := func(seqID string, startNode int) int {\n\t\tfor nextNode := startNode + 1; nextNode <= len(msaNodes.nodeHolder); nextNode++ {\n\t\t\tfor _, parentSeqID := range msaNodes.nodeHolder[nextNode].parentSeqIDs {\n\t\t\t\tif seqID == parentSeqID {\n\t\t\t\t\t\t\treturn nextNode\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn 0\n\t}\n\t// iterate over each MSA sequence, connecting edges for each one\n\tfor _, seqID := range msaNodes.seqIDs {\n\t\tstartNode := getFirstNode(seqID)\n\t\tif startNode == 0 {\n\t\t\treturn fmt.Errorf(\"Node parse error: Could not identify start node for %v\", seqID)\n\t\t}\n\t\tfor {\n\t\t\tnextNode := findNextNode(seqID, startNode)\n\t\t\tif nextNode == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// draw edges\n\t\t\tmsaNodes.nodeHolder[startNode].outEdges[nextNode] = struct{}{}\n\t\t\tmsaNodes.nodeHolder[nextNode].inEdges[startNode] = struct{}{}\n\t\t\tstartNode = nextNode\n\t\t}\n\t}\n\treturn nil\n}", "func (m *AmbulanceMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 7)\n\tif m.removedcarinspections != nil {\n\t\tedges = append(edges, ambulance.EdgeCarinspections)\n\t}\n\tif m.removedcarcheckinout != nil {\n\t\tedges = append(edges, ambulance.EdgeCarcheckinout)\n\t}\n\tif m.removedambulance != nil {\n\t\tedges = append(edges, ambulance.EdgeAmbulance)\n\t}\n\treturn edges\n}", "func (m *RepairinvoiceMutation) RemovedEdges() []string {\n\tedges := make([]string, 0, 3)\n\treturn edges\n}" ]
[ "0.5982618", "0.5862496", "0.56688166", "0.54881394", "0.54257953", "0.53255206", "0.52897364", "0.5236274", "0.5192016", "0.51388514", "0.51366216", "0.512988", "0.5058522", "0.50200945", "0.5015581", "0.5001819", "0.4978672", "0.4976712", "0.49593082", "0.4959274", "0.49556208", "0.49431384", "0.49344242", "0.4928936", "0.49267536", "0.49105", "0.48954332", "0.48900655", "0.48900655", "0.48899373", "0.48831978", "0.4881163", "0.48793697", "0.487399", "0.4872767", "0.48719984", "0.4866146", "0.48622406", "0.48333678", "0.48333678", "0.4827104", "0.48252523", "0.48234162", "0.48234162", "0.4821914", "0.48141828", "0.4813942", "0.48101845", "0.48061776", "0.48037326", "0.48033243", "0.48001269", "0.47928616", "0.47863346", "0.4784068", "0.47737774", "0.4767666", "0.4767166", "0.47663835", "0.47558045", "0.47503126", "0.47485533", "0.47461772", "0.4737833", "0.4737379", "0.4736469", "0.4726801", "0.47248673", "0.47228912", "0.47225115", "0.47215387", "0.47182658", "0.47119674", "0.47096354", "0.47074658", "0.47058696", "0.47043908", "0.4702695", "0.47018605", "0.47007364", "0.46999723", "0.469921", "0.469921", "0.46911946", "0.4690112", "0.46891707", "0.4686387", "0.4684977", "0.46818167", "0.4677433", "0.4676705", "0.46752805", "0.46732807", "0.4669661", "0.4667192", "0.46647778", "0.46637705", "0.46630225", "0.4661965", "0.46597442" ]
0.7619897
0
New creates a new DB wrapper around LMDB
func New(folder string, cfg *Config) (*DB, error) { env, err := lmdb.NewEnv() if err != nil { return nil, errors.Wrap(err, "env create failed") } err = env.SetMaxDBs(cfg.MaxDBs) if err != nil { return nil, errors.Wrap(err, "env config failed") } err = env.SetMapSize(cfg.SizeMbs * 1024 * 1024) if err != nil { return nil, errors.Wrap(err, "map size failed") } if err = env.SetFlags(cfg.EnvFlags); err != nil { return nil, errors.Wrap(err, "set flag") } os.MkdirAll(folder, os.ModePerm) err = env.Open(folder, 0, cfg.Mode) if err != nil { return nil, errors.Wrap(err, "open env") } var staleReaders int if staleReaders, err = env.ReaderCheck(); err != nil { return nil, errors.Wrap(err, "reader check") } if staleReaders > 0 { log.Printf("cleared %d reader slots from dead processes", staleReaders) } var dbi lmdb.DBI err = env.Update(func(txn *lmdb.Txn) (err error) { dbi, err = txn.CreateDBI("agg") return err }) if err != nil { return nil, errors.Wrap(err, "create DB") } return &DB{env, dbi}, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func dbNew() *DB {\n\treturn &DB{\n\t\tdata: make(map[string]string),\n\t}\n}", "func New(ctx context.Context, ng engine.Engine) (*DB, error) {\n\treturn newDatabase(ctx, ng, database.Options{Codec: msgpack.NewCodec()})\n}", "func New(tb testing.TB, dir string, ver string, logger func(string)) (*DB, error) {\n\tdb, err := doNew(tb, dir, ver, logger)\n\tif err != nil && tb != nil {\n\t\ttb.Fatal(\"failed initializing database: \", err)\n\t}\n\treturn db, err\n}", "func NewDB(filePath string) (*LDB, error) {\n ldb, err := leveldb.OpenFile(filePath, &opt.Options{\n Filter: filter.NewBloomFilter(10),\n })\n if _, corrupt := err.(*errors.ErrCorrupted); corrupt {\n ldb, err = leveldb.RecoverFile(filePath, nil)\n }\n\n logger := log.New(\"db\", filePath)\n\n //catch errors if db if we don't have a db\n if err != nil {\n return nil, err\n }\n\n //return new wrapped db\n return &LDB {\n db: ldb,\n path: filePath,\n log: logger,\n }, nil\n}", "func New(dsn string, maxConn int) dao.DB {\n\treturn &db{\n\t\tDB: open(dsn, maxConn),\n\t}\n}", "func New(metainfo *Client, encStore *encryption.Store) *DB {\n\treturn &DB{\n\t\tmetainfo: metainfo,\n\t\tencStore: encStore,\n\t}\n}", "func New(ctx context.Context, ng engine.Engine, opts Options) (*Database, error) {\n\tif opts.Codec == nil {\n\t\treturn nil, errors.New(\"missing codec\")\n\t}\n\n\tdb := Database{\n\t\tng: ng,\n\t\tCodec: opts.Codec,\n\t}\n\n\tntx, err := db.ng.Begin(ctx, engine.TxOptions{\n\t\tWritable: true,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer ntx.Rollback()\n\n\terr = db.initInternalStores(ntx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = ntx.Commit()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &db, nil\n}", "func New(path string) (*DB, error) {\n\tdb, err := bolt.Open(path, 0600, &bolt.Options{Timeout: 1 * time.Second})\n\treturn &DB{Bolt: db}, err\n}", "func New() (database, error) {\n\tdb, err := sql.Open(driverName, databaseName)\n\tif err != nil {\n\t\treturn database{}, err\n\t}\n\n\treturn database{db}, nil\n}", "func New(dl logbook.Logbook, databaseName string) (*Database, error) {\n\tdb := &Database{\n\t\tDlog: dl,\n\t}\n\n\t// Check if data folder exists\n\t_, err := os.Stat(config.DBFolder)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t// If folder does not exist, create it\n\t\t\terr := os.Mkdir(config.DBFolder, 0777)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"error\")\n\t\t\t}\n\t\t}\n\t}\n\n\terr = os.Chdir(config.DBFolder)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, errors.New(\"error\")\n\t}\n\n\tf, err := os.Create(databaseName)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, errors.New(\"error\")\n\t}\n\n\tdefer f.Close()\n\n\tdb.DatabaseName = databaseName\n\tdb.Version = 001\n\tdb.File = f\n\tdb.Data = map[string][]byte{}\n\n\twr := bufio.NewWriter(f)\n\n\t_, err = fmt.Fprintf(wr, \"DolceDB.%d\", config.DBVersion)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\twr.Flush()\n\n\terr = db.RebuildMap()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}", "func New(db *sql.DB) *Database {\n\treturn &Database{\n\t\tdb: db,\n\t}\n}", "func New(db *sql.DB) *Database {\n\treturn &Database{\n\t\tdb: db,\n\t}\n}", "func New(db *sql.DB, maxGatewayCount int) DB {\n\treturn database{\n\t\tdb: db,\n\t\tmaxGatewayCount: maxGatewayCount,\n\t}\n}", "func NewDb() *Db { //s interface{}\n\td := &Db{\n\t\tarr: make(Lister, 0, 100),\n\t\tupdate: time.Now().UnixNano(),\n\t}\n\treturn d\n}", "func newWrapper(db *DB) *Wrapper {\n\treturn &Wrapper{executable: true, db: db, Timestamp: &Timestamp{}}\n}", "func NewDB(conf configuration.Ledger, opts *badger.Options) (*DB, error) {\n\topts = setOptions(opts)\n\tdir, err := filepath.Abs(conf.Storage.DataDirectory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts.Dir = dir\n\topts.ValueDir = dir\n\n\tbdb, err := badger.Open(*opts)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"local database open failed\")\n\t}\n\n\tdb := &DB{\n\t\tdb: bdb,\n\t\ttxretiries: conf.Storage.TxRetriesOnConflict,\n\t\tidlocker: NewIDLocker(),\n\t\tnodeHistory: map[core.PulseNumber][]core.Node{},\n\t}\n\treturn db, nil\n}", "func New(t *testing.T) *sql.DB {\n\tt.Helper()\n\n\tif testing.Short() {\n\t\tt.Skip(\"skip store test because short mode\")\n\t}\n\n\tdb, err := sql.Open(\"txdb\", t.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"can't open db: %s\", err)\n\t}\n\n\tt.Cleanup(func() {\n\t\tdb.Close()\n\t})\n\n\treturn db\n}", "func New(path string) (*DB, error) {\n\tdb, err := badger.Open(badger.DefaultOptions(path))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"new db error: %w\", err)\n\t}\n\treturn &DB{db}, nil\n}", "func New(db *leveldb.DB) (Storage, error) {\n\ttx, err := db.OpenTransaction()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error opening leveldb transaction: %v\", err)\n\t}\n\n\treturn &storage{\n\t\tstore: tx,\n\t\tdb: db,\n\t\ttx: tx,\n\t}, nil\n}", "func New(url string, timeout time.Duration) (*DB, error) {\n\n\tif timeout == 0 {\n\t\ttimeout = 60 * time.Second\n\t}\n\n\t// Create a session which maintains a pool of socket connections.\n\tsession, err := mgo.DialWithTimeout(url, timeout)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"mgo.DialWithTimeout: %s,%v\", url, timeout)\n\t}\n\n\tsession.SetMode(mgo.Monotonic, true)\n\n\tdb := DB{\n\t\tdatabase: session.DB(apiDatabase),\n\t\tsession: session,\n\t}\n\n\treturn &db, nil\n}", "func NewDB(conf configuration.Ledger, opts *badger.Options) (*DB, error) {\n\topts = setOptions(opts)\n\tdir, err := filepath.Abs(conf.Storage.DataDirectory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts.Dir = dir\n\topts.ValueDir = dir\n\n\tbdb, err := badger.Open(*opts)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"local database open failed\")\n\t}\n\n\tdb := &DB{\n\t\tdb: bdb,\n\t\ttxretiries: conf.Storage.TxRetriesOnConflict,\n\t\tidlocker: NewIDLocker(),\n\t}\n\treturn db, nil\n}", "func New(config *connect.Config) Db {\n\tvar db Db\n\t// first find db in dbMap by DatabaseName\n\tdb = dbMap[config.DatabaseName]\n\t// find\n\tif db != nil {\n\t\treturn db\n\t}\n\t// not find in dbMap - New\n\tswitch config.DbType {\n\tcase connect.MONGODB:\n\t\t// init mongodb\n\t\tdb = mongo.New(config)\n\t}\n\tdbMap[config.DatabaseName] = db\n\treturn db\n}", "func New(cfg config.DB) (*gorm.DB, error) {\n\tdb, err := database.New(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := db.AutoMigrate(\n\t\trepository.Client{},\n\t\trepository.Deal{},\n\t\trepository.Position{},\n\t\trepository.OHLCV{},\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}", "func New(opts *Options) (*Database, error) {\n\tdb := new(Database)\n\tif err := opts.initCryptParams(&db.cparams); err != nil {\n\t\treturn nil, err\n\t}\n\tdb.init(nil, nil, opts)\n\treturn db, nil\n}", "func (b backend) New(ctx context.Context, l log.Interface, cfg *config.Config) (persist.Database, error) {\n\tusername, password, host, port, db :=\n\t\tcfg.Database.Postgres.Username,\n\t\tcfg.Database.Postgres.Password,\n\t\tcfg.Database.Postgres.Host,\n\t\tcfg.Database.Postgres.Port,\n\t\tcfg.Database.Postgres.DB\n\n\tconnString := fmt.Sprintf(\n\t\t\"postgres://%s:%s@%s:%d/%s\",\n\t\tusername,\n\t\tpassword,\n\t\thost,\n\t\tport,\n\t\tdb,\n\t)\n\n\tconn, err := pgxpool.Connect(ctx, connString)\n\tif err != nil {\n\t\treturn nil, errwrap.Wrap(err, \"failed to connect to postgres\")\n\t}\n\n\tq := gen.New(conn)\n\n\treturn &database{\n\t\tqueries: q,\n\t}, nil\n}", "func New(db *sql.DB) *Database {\n\treturn &Database{\n\t\tUsers: users.New(db),\n\t\tSessions: sessions.New(db),\n\t\tWorkouts: workouts.New(db),\n\t\tExercises: exercises.New(db),\n\t}\n}", "func New(path string) (*LevelDB, error) {\n\treturn NewWithOptions(path, DefaultOptions)\n}", "func NewDB(path string) (DB, error) {\n\tdb, err := leveldb.OpenFile(path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &levelDBImpl{\n\t\tDB: db,\n\t}, nil\n}", "func newDatabase(info extraInfo, db *sql.DB) *database {\n\treturn &database{\n\t\tname: info.dbName,\n\t\tdriverName: info.driverName,\n\t\tdb: db,\n\t}\n}", "func New() (*Database, error) {\n\tvar err error\n\tvar db *memdb.MemDB\n\tonce.Do(func() {\n\t\tinstance = &Database{}\n\t\tdb, err = instance.createSchema()\n\t\tinstance.db = db\n\t\tif err == nil {\n\t\t\tinstance.loadDefaults()\n\t\t}\n\t})\n\treturn instance, err\n}", "func New(conn *sql.DB, preview bool) (w *Wrapper, err error) {\n\tw = &Wrapper{\n\t\tpreview: preview,\n\t\tprepareType: examineDB(conn),\n\t}\n\n\tif w.prepareType == dbUnknown {\n\t\treturn nil, UnknownDatabase\n\t}\n\n\terr = w.Bootstrap(conn)\n\treturn\n}", "func New(dir, name string) (mp *MapDB, err error) {\n\tvar m MapDB\n\t// Initialize map\n\tm.m = make(map[string]string)\n\n\t// Encryption middleware\n\tcmw := middleware.NewCryptyMW([]byte(\" encryption key \"), make([]byte, 16))\n\tif cmw == nil {\n\n\t}\n\n\t// Create a new instance of mrT\n\tif m.mrT, err = mrT.New(dir, name); err != nil {\n\t\treturn\n\t}\n\n\tif err = m.mrT.ForEach(m.load); err != nil {\n\t\treturn\n\t}\n\n\t// Assign pointer to our MapDB\n\tmp = &m\n\treturn\n}", "func NewDB(now string) *DB {\n\treturn &DB{\n\t\tmeasurements: make(map[string]*Measurement),\n\t\tseries: make(map[uint32]*Series),\n\t\tNow: mustParseTime(now),\n\t}\n}", "func NewDb(db *sql.DB, driverName string) *DB {\n return &DB{DB: db, driverName: driverName, Mapper: mapper()}\n}", "func New(ctx context.Context, db *sql.DB, m map[string]string) (*Store, error) {\n\tstore := &Store{db: db}\n\terr := store.InitTable(ctx, m)\n\treturn store, err\n}", "func New(collection *driver.Collection) *DB {\n\treturn &DB{\n\t\tCollection: collection,\n\t}\n}", "func New(db *pg.DB) DB {\n\td := DB{DB: db, crcTable: crc64.MakeTable(crc64.ECMA)}\n\n\treturn d\n}", "func NewDB(session *mgo.Session) *DB {\n\treturn &DB{session: session}\n}", "func (b Factory) New(ctx context.Context, name string, l log.Interface, cfg *config.Config) (Database, error) {\n\tbackend, ok := b[name]\n\tif !ok {\n\t\treturn nil, errwrap.Wrap(ErrDatabaseNotFound, name)\n\t}\n\n\tdb, err := backend.New(ctx, l, cfg)\n\n\treturn db, errwrap.Wrap(err, \"failed to create backend\")\n}", "func NewDB(filename string) (*DB, error) {\n\tdb, err := bbolt.Open(filename, 0600, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DB{db: db}, nil\n}", "func NewDB(filename string) (*DB, error) {\n\tdb, err := bbolt.Open(filename, 0600, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DB{db: db}, nil\n}", "func New(cnf Config) (db *gorm.DB, err error) {\n\tswitch cnf.Engine {\n\tcase \"postgres\":\n\t\tdb, err = gorm.Open(cnf.Engine, fmt.Sprintf(\"host=%s port=%d user=%s dbname=%s password=%s sslmode=%s\",\n\t\t\tcnf.Host,\n\t\t\tcnf.Port,\n\t\t\tcnf.Username,\n\t\t\tcnf.DbName,\n\t\t\tcnf.Password,\n\t\t\tcnf.SslMode))\n\tcase \"mysql\":\n\tcase \"mssql\":\n\tdefault:\n\t\tdb, err = gorm.Open(cnf.Engine, fmt.Sprintf(\"host=%s port=%d user=%s dbname=%s password=%s sslmode=%s\",\n\t\t\tcnf.Host,\n\t\t\tcnf.Port,\n\t\t\tcnf.Username,\n\t\t\tcnf.DbName,\n\t\t\tcnf.Password,\n\t\t\tcnf.SslMode))\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb.LogMode(cnf.Log)\n\n\treturn db, err\n}", "func New(dsn string) *DB {\n\tconnection, err := sql.Open(\"postgres\", dsn)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// passive attempt to create table\n\t_, _ = connection.Exec(fmt.Sprintf(`create table %s (job jsonb);`, TABLE_NAME))\n\treturn &DB{\n\t\tconn: connection,\n\t}\n}", "func New(path string) (*DB, error) {\n\tdb := &DB{path: path}\n\tif err := db.open(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"Could not initialize DB\")\n\t}\n\n\treturn db, nil\n}", "func New(file string, configBytes []byte, log logging.Logger, namespace string, reg prometheus.Registerer) (database.Database, error) {\n\tparsedConfig := config{\n\t\tBlockCacheCapacity: DefaultBlockCacheSize,\n\t\tDisableSeeksCompaction: true,\n\t\tOpenFilesCacheCapacity: DefaultHandleCap,\n\t\tWriteBuffer: DefaultWriteBufferSize / 2,\n\t\tFilterBitsPerKey: DefaultBitsPerKey,\n\t\tMaxManifestFileSize: DefaultMaxManifestFileSize,\n\t\tMetricUpdateFrequency: DefaultMetricUpdateFrequency,\n\t}\n\tif len(configBytes) > 0 {\n\t\tif err := json.Unmarshal(configBytes, &parsedConfig); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrInvalidConfig, err)\n\t\t}\n\t}\n\n\tlog.Info(\"creating leveldb\",\n\t\tzap.Reflect(\"config\", parsedConfig),\n\t)\n\n\t// Open the db and recover any potential corruptions\n\tdb, err := leveldb.OpenFile(file, &opt.Options{\n\t\tBlockCacheCapacity: parsedConfig.BlockCacheCapacity,\n\t\tBlockSize: parsedConfig.BlockSize,\n\t\tCompactionExpandLimitFactor: parsedConfig.CompactionExpandLimitFactor,\n\t\tCompactionGPOverlapsFactor: parsedConfig.CompactionGPOverlapsFactor,\n\t\tCompactionL0Trigger: parsedConfig.CompactionL0Trigger,\n\t\tCompactionSourceLimitFactor: parsedConfig.CompactionSourceLimitFactor,\n\t\tCompactionTableSize: parsedConfig.CompactionTableSize,\n\t\tCompactionTableSizeMultiplier: parsedConfig.CompactionTableSizeMultiplier,\n\t\tCompactionTotalSize: parsedConfig.CompactionTotalSize,\n\t\tCompactionTotalSizeMultiplier: parsedConfig.CompactionTotalSizeMultiplier,\n\t\tDisableSeeksCompaction: parsedConfig.DisableSeeksCompaction,\n\t\tOpenFilesCacheCapacity: parsedConfig.OpenFilesCacheCapacity,\n\t\tWriteBuffer: parsedConfig.WriteBuffer,\n\t\tFilter: filter.NewBloomFilter(parsedConfig.FilterBitsPerKey),\n\t\tMaxManifestFileSize: parsedConfig.MaxManifestFileSize,\n\t})\n\tif _, corrupted := err.(*errors.ErrCorrupted); corrupted {\n\t\tdb, err = leveldb.RecoverFile(file, nil)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrCouldNotOpen, err)\n\t}\n\n\twrappedDB := &Database{\n\t\tDB: db,\n\t\tcloseCh: make(chan struct{}),\n\t}\n\tif parsedConfig.MetricUpdateFrequency > 0 {\n\t\tmetrics, err := newMetrics(namespace, reg)\n\t\tif err != nil {\n\t\t\t// Drop any close error to report the original error\n\t\t\t_ = db.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\twrappedDB.metrics = metrics\n\t\twrappedDB.closeWg.Add(1)\n\t\tgo func() {\n\t\t\tt := time.NewTicker(parsedConfig.MetricUpdateFrequency)\n\t\t\tdefer func() {\n\t\t\t\tt.Stop()\n\t\t\t\twrappedDB.closeWg.Done()\n\t\t\t}()\n\n\t\t\tfor {\n\t\t\t\tif err := wrappedDB.updateMetrics(); err != nil {\n\t\t\t\t\tlog.Warn(\"failed to update leveldb metrics\",\n\t\t\t\t\t\tzap.Error(err),\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase <-t.C:\n\t\t\t\tcase <-wrappedDB.closeCh:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\treturn wrappedDB, nil\n}", "func New(host string) *Database {\n\treturn &Database{\n\t\tHost: host,\n\t}\n}", "func New() (db.DB, error) {\n\tx := &MemDB{\n\t\teMap: make(map[string]*pb.Entity),\n\t\tgMap: make(map[string]*pb.Group),\n\t}\n\n\thealth.RegisterCheck(\"MemDB\", x.healthCheck)\n\treturn x, nil\n}", "func newDB(client *gorm.DB) (*DB, error) {\n\tif client == nil {\n\t\treturn nil, fmt.Errorf(\"Mysql: could not connect\")\n\t}\n\tdb := &DB{\n\t\tclient: client,\n\t}\n\treturn db, nil\n}", "func New(ctx context.Context, db database.Database, config Config) (MerkleDB, error) {\n\tmetrics, err := newMetrics(\"merkleDB\", config.Reg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newDatabase(ctx, db, config, metrics)\n}", "func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {\n\tvar (\n\t\trepo storage.Repository\n\t\terr error\n\t\tdbCloser io.Closer\n\t)\n\tif opts == nil {\n\t\topts = defaultOptions()\n\t}\n\n\tif opts.Logger == nil {\n\t\topts.Logger = log.Noop\n\t}\n\n\tlock := multex.New()\n\tmetrics := newMetrics()\n\topts.LdbStats.CompareAndSwap(nil, &metrics.LevelDBStats)\n\n\tlocker := func(addr swarm.Address) func() {\n\t\tlock.Lock(addr.ByteString())\n\t\treturn func() {\n\t\t\tlock.Unlock(addr.ByteString())\n\t\t}\n\t}\n\n\tif dirPath == \"\" {\n\t\trepo, dbCloser, err = initInmemRepository(locker)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// only perform migration if not done already\n\t\tif _, err := os.Stat(path.Join(dirPath, indexPath)); err != nil {\n\t\t\terr = performEpochMigration(ctx, dirPath, opts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\trepo, dbCloser, err = initDiskRepository(ctx, dirPath, locker, opts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tsharkyBasePath := \"\"\n\tif dirPath != \"\" {\n\t\tsharkyBasePath = path.Join(dirPath, sharkyPath)\n\t}\n\terr = migration.Migrate(\n\t\trepo.IndexStore(),\n\t\tlocalmigration.AllSteps(sharkyBasePath, sharkyNoOfShards, repo.ChunkStore()),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcacheObj, err := initCache(ctx, opts.CacheCapacity, repo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger := opts.Logger.WithName(loggerName).Register()\n\n\tdb := &DB{\n\t\tmetrics: metrics,\n\t\tlogger: logger,\n\t\tbaseAddr: opts.Address,\n\t\trepo: repo,\n\t\tlock: lock,\n\t\tcacheObj: cacheObj,\n\t\tretrieval: noopRetrieval{},\n\t\tpusherFeed: make(chan *pusher.Op),\n\t\tquit: make(chan struct{}),\n\t\tbgCacheLimiter: make(chan struct{}, 16),\n\t\tdbCloser: dbCloser,\n\t\tbatchstore: opts.Batchstore,\n\t\tvalidStamp: opts.ValidStamp,\n\t\tevents: events.NewSubscriber(),\n\t\treserveBinEvents: events.NewSubscriber(),\n\t\topts: workerOpts{\n\t\t\twarmupDuration: opts.WarmupDuration,\n\t\t\twakeupDuration: opts.ReserveWakeUpDuration,\n\t\t},\n\t\tdirectUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes),\n\t\tinFlight: new(util.WaitingCounter),\n\t}\n\n\tif db.validStamp == nil {\n\t\tdb.validStamp = postage.ValidStamp(db.batchstore)\n\t}\n\n\tif opts.ReserveCapacity > 0 {\n\t\trs, err := reserve.New(\n\t\t\topts.Address,\n\t\t\trepo.IndexStore(),\n\t\t\topts.ReserveCapacity,\n\t\t\topts.RadiusSetter,\n\t\t\tlogger,\n\t\t\tfunc(ctx context.Context, store internal.Storage, addrs ...swarm.Address) error {\n\t\t\t\tdefer func() { db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) }()\n\n\t\t\t\tdb.lock.Lock(cacheAccessLockKey)\n\t\t\t\tdefer db.lock.Unlock(cacheAccessLockKey)\n\n\t\t\t\treturn cacheObj.MoveFromReserve(ctx, store, addrs...)\n\t\t\t},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdb.reserve = rs\n\n\t\tdb.metrics.StorageRadius.Set(float64(rs.Radius()))\n\t\tdb.metrics.ReserveSize.Set(float64(rs.Size()))\n\t}\n\tdb.metrics.CacheSize.Set(float64(db.cacheObj.Size()))\n\n\t// Cleanup any dirty state in upload and pinning stores, this could happen\n\t// in case of dirty shutdowns\n\terr = errors.Join(\n\t\tupload.CleanupDirty(db),\n\t\tpinstore.CleanupDirty(db),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}", "func newMemorySliceDB() (*sliceDB, error) {\r\n\tdb, err := leveldb.Open(storage.NewMemStorage(), nil)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn &sliceDB{\r\n\t\tlvl: db,\r\n\t\tquit: make(chan struct{}),\r\n\t}, nil\r\n}", "func New(db *sql.DB) *Model {\n\treturn &Model{db}\n}", "func New(db SqlxDB) *DB {\n\treturn &DB{\n\t\tDB: db,\n\t}\n}", "func New(size int) *DB {\n\treturn &DB{\n\t\tdocs: make(map[int][]byte, size),\n\t\tall: intset.NewBitSet(0),\n\t}\n}", "func NewDb() (*DbWrapper, error) {\n\tsql.Register(\"bftdb\", &sqlite3.SQLiteDriver{})\n\tdb, err := sql.Open(\"bftdb\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &DbWrapper{db}, nil\n}", "func NewDB() *DB {\n\treturn &DB{}\n}", "func (m *LocalManager) New(ctx context.Context, id string) (linker.Storage, error) {\n\tdb, err := NewLocalStorage(ctx, fmt.Sprintf(\"%s/db-%s\", m.path, id))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}", "func newDatabase(s *Server) *Database {\n\treturn &Database{\n\t\tserver: s,\n\t\tusers: make(map[string]*DBUser),\n\t\tpolicies: make(map[string]*RetentionPolicy),\n\t\tshards: make(map[uint64]*Shard),\n\t\tseries: make(map[string]*Series),\n\t}\n}", "func New(db *sql.DB, logger log.Logger) (todo.Repository, error) {\n\t// return repository\n\treturn &repository{\n\t\tdb: db,\n\t\tlogger: log.With(logger, \"rep\", \"cockroachdb\"),\n\t}, nil\n}", "func New(ctx context.Context, config *Configuration) (*Database, error) {\n\tdbConfig := fmt.Sprintf(\n\t\t\"host=%s port=%d user=%s password='%s' dbname=%s search_path=%s sslmode=require\",\n\t\tconfig.Host,\n\t\tconfig.Port,\n\t\tconfig.User,\n\t\tconfig.Password,\n\t\tconfig.Name,\n\t\tconfig.Schema,\n\t)\n\n\tdb, err := sql.Open(config.Driver, dbConfig)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn &Database{\n\t\tContext: ctx,\n\t\tDatabase: db,\n\t}, nil\n}", "func New(config *Config) (Database, error) {\n\tdb, err := connectToDB(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Println(\"Successfully connected to db\")\n\n\terr = migrateDB(config, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Println(\"Successfully ran migrations\")\n\n\tsqlxDB := sqlx.NewDb(db, config.Driver)\n\n\tbaseDB := database{\n\t\tdb: sqlxDB,\n\t}\n\n\treturn &queries{\n\t\tauthorsQueries{baseDB},\n\t\tpostsQueries{baseDB},\n\t}, nil\n}", "func New(db *sqlx.DB) *Model {\n\treturn &Model{db: db}\n}", "func New(db *bolt.DB) *kvq.DB {\n\treturn kvq.NewDB(&DB{db})\n}", "func New() (*ledis.DB, *LedisMock, error) {\n\t// Create a mock object\n\tmock := LedisMock{}\n\n\tdriver.Register(Store{DBName: \"mock\", Mock: &mock})\n\n\tmockcfg := config.NewConfigDefault()\n\tmockcfg.DBName = \"mock\"\n\n\tconn, err := ledis.Open(mockcfg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdb, err := conn.Select(0)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn db, &mock, nil\n}", "func New(db *sql.DB) qbdb.DB {\n\treturn qbdb.New(Driver{}, db)\n}", "func New(db *sql.DB) qbdb.DB {\n\treturn qbdb.New(Driver{}, db)\n}", "func New(sqliteDsn string) (*sql.DB, error) {\n\tdb, err := sql.Open(\"sqlite3\", sqliteDsn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not open a connection to %q: %v\", sqliteDsn, err)\n\t}\n\n\tdb.SetMaxOpenConns(1)\n\n\treturn db, nil\n}", "func New(file string, configBytes []byte, log logging.Logger) (database.Database, error) {\n\tfilter := grocksdb.NewBloomFilter(BitsPerKey)\n\n\tblockOptions := grocksdb.NewDefaultBlockBasedTableOptions()\n\tblockOptions.SetBlockCache(grocksdb.NewLRUCache(BlockCacheSize))\n\tblockOptions.SetBlockSize(BlockSize)\n\tblockOptions.SetFilterPolicy(filter)\n\n\toptions := grocksdb.NewDefaultOptions()\n\toptions.SetCreateIfMissing(true)\n\toptions.OptimizeUniversalStyleCompaction(MemoryBudget)\n\toptions.SetBlockBasedTableFactory(blockOptions)\n\n\tif err := os.MkdirAll(file, perms.ReadWriteExecute); err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb, err := grocksdb.OpenDb(options, file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\titeratorOptions := grocksdb.NewDefaultReadOptions()\n\titeratorOptions.SetFillCache(false)\n\n\treturn &Database{\n\t\tdb: db,\n\t\treadOptions: grocksdb.NewDefaultReadOptions(),\n\t\titeratorOptions: iteratorOptions,\n\t\twriteOptions: grocksdb.NewDefaultWriteOptions(),\n\t\tlog: log,\n\t}, nil\n}", "func New() *gorm.DB {\n\tvar driver string = viper.GetString(\"database_driver\")\n\tvar user string = viper.GetString(\"database_user\")\n\tvar password string = viper.GetString(\"database_password\")\n\tvar port string = viper.GetString(\"database_port\")\n\tvar host string = viper.GetString(\"database_host\")\n\tvar sslMode string = viper.GetString(\"database_ssl\")\n\tvar databaseName string = viper.GetString(\"database_name\")\n\tvar dbConn string = fmt.Sprintf(\"host=%s port=%s user=%s password=%s dbname=%s sslmode=%s\", host, port, user, password, databaseName, sslMode)\n\n\tvar maxIdleCons int = viper.GetInt(\"database_max_idle_conns\")\n\tvar maxOpenConns int = viper.GetInt(\"database_max_open_conns\")\n\n\tdb, err := gorm.Open(driver, dbConn)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Error connecting to database %s. %s.\", databaseName, err)\n\t}\n\n\tdb.SetLogger(gormlog.New(logrus.StandardLogger()))\n\tdb.LogMode(true)\n\n\tdb.DB().SetMaxIdleConns(maxIdleCons)\n\tdb.DB().SetMaxOpenConns(maxOpenConns)\n\n\treturn db\n}", "func NewDB(dbfile string) (*DB, error) {\n\tdb, err := gorm.Open(\"sqlite3\", dbfile)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not connect to db\")\n\t}\n\n\tif err := db.AutoMigrate(&Lease{}).Error; err != nil {\n\t\treturn nil, errors.Wrap(err, \"while migrating database\")\n\t}\n\n\treturn &DB{db: db}, nil\n}", "func New(options ...func(*sqlmock) error) (*sql.DB, Sqlmock, error) {\n\tpool.Lock()\n\tdsn := fmt.Sprintf(\"sqlmock_db_%d\", pool.counter)\n\tpool.counter++\n\n\tsmock := &sqlmock{dsn: dsn, drv: pool, ordered: true}\n\tpool.conns[dsn] = smock\n\tpool.Unlock()\n\n\treturn smock.open(options)\n}", "func New(db db) *Model {\n\treturn &Model{\n\t\tdb: db,\n\t}\n}", "func New(options *pg.Options) *pg.DB {\n\n db := pg.Connect(options)\n db.AddQueryHook(dbLogger{})\n return db\n}", "func newDBStore(db *leveldbhelper.DBHandle, dbName string) *store {\n\treturn &store{db, dbName}\n}", "func New(ctx context.Context, opts *Options) (database *DB, err error) {\n\tif opts != nil && opts.DriverName != \"dexie\" {\n\t\treturn nil, fmt.Errorf(`unexpected driver name for js/wasm: %q (only \"dexie\" is supported)`, opts.DriverName)\n\t}\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = jsutil.RecoverError(r)\n\t\t}\n\t}()\n\tnewDexieDatabase := js.Global().Get(\"__mesh_dexie_newDatabase__\")\n\tif jsutil.IsNullOrUndefined(newDexieDatabase) {\n\t\treturn nil, errors.New(\"could not detect Dexie.js\")\n\t}\n\topts = parseOptions(opts)\n\tdexie := newDexieDatabase.Invoke(opts)\n\n\t// Automatically close the database connection when the context is canceled.\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t_ = dexie.Call(\"close\")\n\t\t}\n\t}()\n\n\treturn &DB{\n\t\tctx: ctx,\n\t\tdexie: dexie,\n\t\topts: opts,\n\t}, nil\n}", "func New(config string, w io.Writer, wErr io.Writer) (db *Storage, err error) {\n\tif w == nil {\n\t\tw = os.Stdout\n\t}\n\tif wErr == nil {\n\t\twErr = os.Stderr\n\t}\n\tdb = &Storage{\n\t\tlog: alog.New(w, \"SQL: \", 0),\n\t\tlogErr: alog.New(w, \"SQLErr: \", 0),\n\t}\n\n\tif config == \"\" {\n\t\terr = fmt.Errorf(\"Invalid configuration passed (empty)\")\n\t\treturn\n\t}\n\tif db.db, err = sqlx.Open(\"mysql\", config); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func NewLevelDB(name, dir string) (db dbm.DB, err error) {\n\tbackend := dbm.GoLevelDBBackend\n\tif DBBackend == string(dbm.CLevelDBBackend) {\n\t\tbackend = dbm.CLevelDBBackend\n\t}\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"couldn't create db: %v\", r)\n\t\t}\n\t}()\n\treturn dbm.NewDB(name, backend, dir), err\n}", "func New(config *Config) (*Database, error) {\n\tdb, err := gorm.Open(\"postgres\", config.DatabaseURI)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to connect to database\")\n\t}\n\treturn &Database{db}, nil\n}", "func New() (*Db, error) {\n\t// Don't feel like setting a password on my local db\n\tp := os.Getenv(\"CIPHER_BIN_DB_PASSWORD\")\n\tif p != \"\" {\n\t\tp = fmt.Sprintf(\"password=%s\", p)\n\t}\n\n\tconnStr := fmt.Sprintf(\n\t\t\"host=%s port=%s user=%s %s dbname=%s sslmode=%s\",\n\t\tos.Getenv(\"CIPHER_BIN_DB_HOST\"),\n\t\tos.Getenv(\"CIPHER_BIN_DB_PORT\"),\n\t\tos.Getenv(\"CIPHER_BIN_DB_USER\"),\n\t\tp,\n\t\tos.Getenv(\"CIPHER_BIN_DB_NAME\"),\n\t\tos.Getenv(\"CIPHER_BIN_SSL_MODE\"),\n\t)\n\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check that our connection is good\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Db{db}, nil\n}", "func New(opts ...Opt) (*Store, error) {\n\tbadgerOptions := getBadgerOptions(opts...)\n\n\tdb, err := badger.Open(badgerOptions)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not open database: %w\", err)\n\t}\n\n\tstore := &Store{db, newChangelog()}\n\tif err = store.setup(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn store, nil\n}", "func New() *gorm.DB {\n\tdb, err := gorm.Open(\n\t\tsqlite.Open(\"fiber_api.db\"),\n\t\t&gorm.Config{},\n\t)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb.AutoMigrate(&transactions.Transaction{})\n\tdb.AutoMigrate(&users.User{})\n\n\treturn db\n}", "func New(c *config.Config) *DAL {\n\tdb := pg.Connect(&pg.Options{\n\t\tAddr: c.PostgresHost + \":\" + c.PostgresPort,\n\t\tUser: c.PostgresUser,\n\t\tPassword: c.PostgresPass,\n\t\tDatabase: c.PostgresDatabase,\n\t})\n\tdal := &DAL{db}\n\n\tlog.Info(\"DAL waiting for database...\")\n\terr := dal.Ping()\n\tfor err != nil {\n\t\terr = dal.Ping()\n\t}\n\n\tlog.Info(\"Starting DAL using \", dal.db.Options().Addr)\n\n\treturn dal\n}", "func New(host string, port string, user string, pass string, dbname string) (*DataBase, error) {\n\tdsn := \"host=\" + host + \" user=\" + user + \" password=\" + pass + \" dbname=\" + dbname + \" port=\" + port + \" sslmode=disable\" + \" TimeZone=America/Sao_Paulo\"\n\tdb, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})\n\tif err != nil{\n\t\treturn nil, err\n\t}\t\n\tdb.AutoMigrate(&entity.SalesData{})\n\treturn &DataBase{\n\t\thost: host,\n\t\tport: port,\n\t\tuser: user,\n\t\tpass: pass,\n\t\tdbname: dbname,\n\t\tconnection: db,\n\t}, err\n}", "func NewDB(e *entities.DB) *DB {\n\treturn &DB{e: e}\n}", "func NewDatabase(store LeaseStorage) *Database {\n\treturn &Database{\n\t\tstore: store,\n\t\tl: log.Log,\n\t}\n}", "func NewDb(context Context) Db {\n\treturn &dbImpl{\n\t\tcontext: context,\n\t}\n}", "func NewDB(file string) *DB {\n\tdb, err := sqlite.Open(&sqlite.ConnectionURL{Database: file})\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn &DB{sess: db}\n}", "func NewDatabase(name string, root *doltdb.RootValue, ddb *doltdb.DoltDB, rs *env.RepoState) *Database {\n\treturn &Database{\n\t\tname: name,\n\t\troot: root,\n\t\tddb: ddb,\n\t\trs: rs,\n\t\tbatchMode: single,\n\t\ttables: make(map[string]*DoltTable),\n\t}\n}", "func newDBWrapper(settings *Settings, provider provider) *dbWrapper {\n\tw := &dbWrapper{\n\t\tSettings: settings,\n\t\tprovider: provider,\n\t}\n\tif pp, ok := w.provider.(placeholdersProvider); ok {\n\t\tw.placeholdersProvider = pp\n\t}\n\n\treturn w\n}", "func NewMockDB(t *testing.T) *MockDB {\n\treturn &MockDB{t: t}\n}", "func NewDB() *MemDB {\n\treturn &MemDB{\n\t\tbtree: btree.New(bTreeDegree),\n\t\tsaved: make(map[uint64]*btree.BTree),\n\t\tvmgr: db.NewVersionManager(nil),\n\t}\n}", "func (m *MongoDBImpl) New(isread bool) (*mgo.Database, *mgo.Session, error) {\n\tm.ml.Lock()\n\tdefer m.ml.Unlock()\n\n\t// if m.master is alive then continue else, reset as empty.\n\tif m.master != nil {\n\t\tif err := m.master.Ping(); err != nil {\n\t\t\tm.master = nil\n\t\t}\n\t}\n\n\tses, err := getSession(m.Config)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tm.master = ses\n\n\tif isread {\n\t\tcopy := m.master.Copy()\n\t\tdb := copy.DB(m.Config.DB)\n\t\treturn db, copy, nil\n\t}\n\n\tclone := m.master.Clone()\n\tdb := clone.DB(m.Config.DB)\n\treturn db, clone, nil\n}", "func NewLevelDB(dir string) (*LevelDB, error) {\n\tdb, err := leveldb.OpenFile(dir, nil)\n\tldb := &LevelDB{db: db}\n\treturn ldb, err\n}", "func New(dbFilename string) (*EdDb, error) {\n\n\t// First, create a hook which will attach a shared memory-only database to\n\t// each connction opened by golang's database/sql connection pool\n\tsql.Register(\"sqlite3ConnectionCatchingDriver\",\n\t\t&sqlite.SQLiteDriver{\n\t\t\tConnectHook: func(newConn *sqlite.SQLiteConn) error {\n\t\t\t\tnewConn.Exec(\"ATTACH DATABASE 'file::memory:?cache=shared&busy_timeout=60000' AS mem\", nil)\n\t\t\t\tfmt.Println(\"Attach Database to \", newConn)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t)\n\n\t// The hook is now in place, so can create a connection to the disk Db:\n\tdbURI := fmt.Sprintf(\"file:%s?cache=shared&busy_timeout=60000\", dbFilename)\n\tdbConn, err := sqlx.Connect(\"sqlite3ConnectionCatchingDriver\", dbURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdbConn.SetMaxIdleConns(10)\n\tdbConn.Exec(`PRAGMA foreign_keys = ON;`)\n\n\tedDb := EdDb{\n\t\tdbConn: dbConn,\n\t\tstatements: map[string]*sqlx.NamedStmt{},\n\t}\n\n\terr = edDb.initDbSchema()\n\tif err != nil {\n\t\tdbConn.Close()\n\t\treturn nil, err\n\t}\n\n\terr = edDb.buildPreparedStatements()\n\tif err != nil {\n\t\tdbConn.Close()\n\t\treturn nil, err\n\t}\n\treturn &edDb, nil\n}", "func New(db *bolt.DB) Repository {\n\treturn Repository{\n\t\tdb: db,\n\t}\n}", "func newDatabase(count int) (*database, error) {\n\tdb, err := sql.Open(\"postgres\", fmt.Sprintf(`host=%s port=%s user=%s\n\t\tpassword=%s dbname=%s sslmode=disable`,\n\t\thost, port, user, password, dbname))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"connected to psql client, host: %s\\n\", host)\n\n\treturn &database{\n\t\tdb: db,\n\t\terrChan: make(chan error, count),\n\t}, nil\n}", "func NewDB(\n\tregionName string,\n\tsnapshotter *snap.Snapshotter,\n\tproposeC chan []byte,\n\tcommitC chan []byte,\n\terrorC chan error,\n\tstableStore StableStore,\n\tcommandHander CommandHandler,\n) DB {\n\tdb := &phananxDB{\n\t\tregionName: regionName,\n\t\tproposeC: proposeC,\n\t\tstableStore: stableStore,\n\t\tcommandHander: commandHander,\n\t\tsnapshotter: snapshotter,\n\t}\n\t// replay log into key-value map\n\tdb.readCommits(commitC, errorC)\n\t// read commits from raft into kvStore map until error\n\tgo db.readCommits(commitC, errorC)\n\n\treturn db\n}", "func NewDatabase() (ret OpenDB, err error) {\n\tdb := C.DatabaseNew()\n\tif db == nil {\n\t\tcErr := C.GoString(C.LastError)\n\t\tif len(cErr) > 0 {\n\t\t\terr = fmt.Errorf(\"%v\", C.GoString(C.LastError))\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Unknown error has occured\")\n\t\t}\n\t}\n\tret.db = db\n\treturn ret, err\n}", "func New() Lernen {\n\tvar db Lernen\n\n\td := api.New(client.HostURL).DB(\"lernen\")\n\tdb.db = d\n\n\tdb.views.GelerntVon = GelerntVon{\n\t\tView: d.View(\"kasten\", \"gelernt-von\")}\n\n\tdb.views.NachUser = NachUser{\n\t\tView: d.View(\"user\", \"nach-user\")}\n\n\tdb.views.FachNachKarte = FachNachKarte{\n\t\tView: d.View(\"karten\", \"fach-nach-karte\")}\n\n\treturn db\n}", "func New(driver string, conStr string) (*gorm.DB, error) {\n\tdb, err := gorm.Open(driver, conStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = db.DB().Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}" ]
[ "0.7543629", "0.74328727", "0.73548853", "0.7302741", "0.72663367", "0.7214843", "0.7162632", "0.7141212", "0.7136986", "0.7118748", "0.70599926", "0.70599926", "0.6947913", "0.6931758", "0.687707", "0.68690205", "0.6860632", "0.6831759", "0.6822207", "0.6798072", "0.6788559", "0.6783313", "0.6782995", "0.67709965", "0.6765662", "0.67602754", "0.6738872", "0.67236316", "0.66909415", "0.6682271", "0.66761893", "0.66755", "0.66574574", "0.665086", "0.66420203", "0.6636185", "0.66348714", "0.66280586", "0.6626863", "0.6618071", "0.6618071", "0.66116214", "0.66115636", "0.66043615", "0.66019356", "0.6600296", "0.6599908", "0.65837735", "0.6565395", "0.65602255", "0.65579593", "0.65569025", "0.6554975", "0.6552646", "0.6545293", "0.6536749", "0.6532927", "0.6530835", "0.6520878", "0.65147376", "0.6511791", "0.65057194", "0.6489183", "0.6486509", "0.64729875", "0.64729875", "0.64606756", "0.64563406", "0.6450165", "0.6439652", "0.64309496", "0.64239216", "0.6411731", "0.64111346", "0.6396697", "0.6395616", "0.6389381", "0.63878864", "0.63820636", "0.63713336", "0.63641703", "0.63575757", "0.63537925", "0.6350956", "0.6348634", "0.63455385", "0.63267595", "0.6323259", "0.6320474", "0.6315859", "0.6312316", "0.6311174", "0.630362", "0.63034105", "0.6299612", "0.62956166", "0.62883747", "0.62841094", "0.6282404", "0.6276797" ]
0.66963005
28
NewConnection creates a new PGSQLConnection by using the URL found in the DATABASE_URL environment variable
func NewConnection() (*PGSQLConnection, error) { url, ok := os.LookupEnv(databaseENV) if !ok { return nil, fmt.Errorf("missing ENV %s", databaseENV) } db, err := sqlx.Connect("postgres", url) if err != nil { return nil, err } return &PGSQLConnection{ connection: db, }, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewConnection(cfg config.PostgresConfig) (client *sqlt.DB, err error) {\n\tDSN := fmt.Sprintf(\"host=%s port=5432 dbname=%s user=%s password=%s sslmode=disable\", cfg.Host, cfg.Name, cfg.User, cfg.Password)\n\n\tclient, err = sqlt.Open(\"postgres\", DSN)\n\tif err != nil {\n\t\treturn\n\t}\n\tclient.SetMaxOpenConnections(100)\n\treturn\n}", "func newPostgresConnection(cmd *cobra.Command, kind string) (*sqlx.DB, error) {\n\thost, _ := cmd.Flags().GetString(\"postgres-host\")\n\tport, _ := cmd.Flags().GetInt(\"postgres-port\")\n\tsslmode, _ := cmd.Flags().GetString(\"postgres-sslmode\")\n\n\tuser, _ := cmd.Flags().GetString(kind + \"-postgres-user\")\n\tif user == \"\" {\n\t\treturn nil, errors.Errorf(\"flag must not be empty: %s-postgres-user\", kind)\n\t}\n\n\tpassword, _ := cmd.Flags().GetString(kind + \"-postgres-password\")\n\tif password == \"\" {\n\t\treturn nil, errors.Errorf(\"flag must not be empty: %s-postgres-password\", kind)\n\t}\n\n\tdbname, _ := cmd.Flags().GetString(kind + \"-postgres-name\")\n\tif dbname == \"\" {\n\t\treturn nil, errors.Errorf(\"flag must not be empty: %s-postgres-name\", kind)\n\t}\n\n\t// use default dbname, if not provided\n\tif dbname == \"\" {\n\t\tdbname = user\n\t}\n\n\treturn pq.NewConnection(user, password, dbname, host, sslmode, port)\n}", "func newDbConnection(connStr string) *sql.DB {\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(\"[es] new db connection established.\")\n\treturn db\n}", "func NewDBConnection(options *pg.Options) *pg.DB {\n\treturn pg.Connect(options)\n}", "func NewConnection(user, password, name, port, host string, l interfaces.Logger) *sqlx.DB {\n\tdbConnection := fmt.Sprintf(\"user=%s password=%s dbname=%s port=%s host=%s sslmode=disable\",\n\t\tuser, password, name, port, host)\n\n\tl.Info(\"DB CONN\", \"host\", host, \"port\", port)\n\n\tvar db *sqlx.DB\n\tvar err error\n\tif db, err = sqlx.Connect(\"postgres\", dbConnection); err != nil {\n\t\tl.Panic(\"DB CONN FAILED\", \"error\", err.Error())\n\t}\n\n\treturn db\n}", "func New(url string) (*Conn, error) {\n\tconn, err := pgx.Connect(context.Background(), url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to connection to database: %v\\n\", err)\n\t}\n\n\treturn &Conn{db: conn}, nil\n}", "func NewPostgresConnection() (c *PostgresConnection, err error) {\n\n\tfmt.Println(\"Connecting to PostgreSQL database ...\")\n\n\tdriver := os.Getenv(\"DRIVER\")\n\thost := os.Getenv(\"HOST\")\n\tport := os.Getenv(\"PORT\")\n\tuser := os.Getenv(\"USER\")\n\tpassword := os.Getenv(\"PASSWORD\")\n\tdbname := os.Getenv(\"DBNAME\")\n\n\tconnectionString := fmt.Sprintf(\"host=%s port=%s user=%s \"+\n\t\t\"password=%s dbname=%s sslmode=disable\", host, port, user, password, dbname)\n\n\tfmt.Println(connectionString)\n\tdb, err := sql.Open(driver, connectionString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(\"Connected!\")\n\n\treturn &PostgresConnection{db}, nil\n}", "func newPostgresConnection(dsn string) (*postgresConnection, error) {\n\tconn := postgresConnection{\n\t\tDSN: dsn,\n\t\tDB: nil,\n\t}\n\tdb, err := conn.open()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not open postgres db connection, err = %w\", err)\n\t}\n\tconn.DB = db\n\tif err := conn.Check(); err != nil {\n\t\treturn nil, fmt.Errorf(\"postgres db connection check failed, err = %w\", err)\n\t}\n\treturn &conn, nil\n}", "func NewConnection(cfg *Config, l *zap.Logger) (db *pg.DB, err error) {\n\tif cfg == nil {\n\t\terr = ErrEmptyConfig\n\t\treturn\n\t}\n\n\tif l == nil {\n\t\terr = ErrEmptyLogger\n\t\treturn\n\t}\n\n\topts := &pg.Options{\n\t\tAddr: cfg.Hostname,\n\t\tUser: cfg.Username,\n\t\tPassword: cfg.Password,\n\t\tDatabase: cfg.Database,\n\t\tPoolSize: cfg.PoolSize,\n\t}\n\n\tif cfg.Debug {\n\t\tl.Debug(\"Connect to PostgreSQL\",\n\t\t\tzap.String(\"hostname\", cfg.Hostname),\n\t\t\tzap.String(\"username\", cfg.Username),\n\t\t\tzap.String(\"password\", cfg.Password),\n\t\t\tzap.String(\"database\", cfg.Database),\n\t\t\tzap.Int(\"pool_size\", cfg.PoolSize),\n\t\t\tzap.Any(\"options\", cfg.Options))\n\t}\n\n\tif opts.TLSConfig, err = ssl(cfg.Options); err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb = pg.Connect(opts)\n\tif _, err = db.ExecOne(\"SELECT 1\"); err != nil {\n\t\treturn nil, errors.Wrap(err, \"can't connect to postgres\")\n\t}\n\n\tif cfg.Debug {\n\t\th := new(Hook)\n\t\th.After = func(ctx context.Context, e *pg.QueryEvent) error {\n\t\t\tquery, qErr := e.FormattedQuery()\n\t\t\tl.Debug(\"pg query\",\n\t\t\t\tzap.String(\"query\", string(query)),\n\t\t\t\tzap.Duration(\"query_time\", time.Since(h.StartAt)),\n\t\t\t\tzap.Any(\"params\", e.Params),\n\t\t\t\tzap.NamedError(\"format_error\", qErr),\n\t\t\t\tzap.Error(e.Err))\n\n\t\t\treturn e.Err\n\t\t}\n\t\tdb.AddQueryHook(h)\n\t}\n\n\treturn\n}", "func createConnection() *sql.DB {\n\t// load .env file\n\terr := godotenv.Load(\".env\")\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading .env file\")\n\t}\n\n\t// Open the connection\n\tdb, err := sql.Open(\"postgres\", os.Getenv(\"POSTGRES_URL\"))\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// check the connection\n\terr = db.Ping()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Successfully connected!\")\n\t// return the connection\n\treturn db\n}", "func createConnection() *sql.DB {\n\t// load .env file\n\terr := godotenv.Load(\".env\")\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading .env file\")\n\t}\n\n\t// Open the connection\n\tusername := os.Getenv(\"db_user\")\n\tpassword := os.Getenv(\"db_pass\")\n\tdbName := os.Getenv(\"db_name\")\n\tdbHost := os.Getenv(\"db_host\")\n\tdbURI := fmt.Sprintf(\"host=%s user=%s dbname=%s sslmode=disable password=%s\", dbHost, username, dbName, password) //Build connection string\n\n\tdb, err := sql.Open(\"postgres\", dbURI)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// check the connection\n\terr = db.Ping()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Successfully connected!\")\n\t// return the connection\n\treturn db\n}", "func NewConnection(pgsql postgres.Client) core.Connection {\n\treturn &connection{\n\t\tpg: pgsql,\n\t\tmainPg: pgsql.MainDatastore(),\n\t\tappUser: &applicationUser{\n\t\t\tpg: pgsql,\n\t\t\tmainPg: pgsql.MainDatastore(),\n\t\t},\n\t}\n}", "func PGSQLConnect() *PostgreSQLConnection {\n\tif connection == nil {\n\t\tdbHost := configuration.Database.Host\n\t\tdbPort := configuration.Database.Port\n\t\tdbUser := configuration.Database.User\n\t\tdbPassword := configuration.Database.Password\n\t\tdatabase := configuration.Database.DatabaseName\n\n\t\tconn := fmt.Sprintf(\"host=%s port=%s user=%s password=%s database=%s\"+\n\t\t\t\" sslmode=disable\", dbHost, dbPort, dbUser, dbPassword, database)\n\n\t\tdb, err := sql.Open(\"postgres\", conn)\n\n\t\t// Based on the users of the application\n\t\tdb.SetMaxOpenConns(10)\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[!] Couldn't connect to the database. Reason %v\\n\", err)\n\t\t\treturn nil\n\t\t}\n\n\t\terr = db.Ping()\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[!] Couldn't ping to the database. Reason %v\\n\", err)\n\t\t\treturn nil\n\t\t}\n\t\tconnection = &PostgreSQLConnection{}\n\t\tconnection.db = db\n\t}\n\n\treturn connection\n}", "func New(s Settings) (*sqlx.DB, error) {\n\tconn, err := sqlx.Connect(\"pgx\", fmt.Sprintf(\"postgres://postgres:devpassword@postgres:5432/%s?sslmode=disable\", s.Database))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conn, nil\n}", "func connectDatabase(connectionURL string) *sql.DB {\n\tdb, err := sql.Open(\"postgres\", connectionURL)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn db\n}", "func newConnection() (*gredis.Client, error) {\n\thost = os.Getenv(\"REDIS_HOST\")\n\tport = os.Getenv(\"REDIS_PORT\")\n\trdb := gredis.NewClient(&gredis.Options{\n\t\tAddr: fmt.Sprintf(\"%s:%s\", host, port),\n\t\tPassword: password,\n\t\tDB: db,\n\t})\n\n\tstatus := rdb.Ping(rdb.Context())\n\terr := status.Err()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rdb, nil\n}", "func NewDB(connURL string) (*DB, error) {\n\tdb, err := sql.Open(\"postgres\", connURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DB{db}, nil\n}", "func NewConnection(username, password, dsn string /*commitMode , pool, twophase bool*/) (\n\tconn Connection, err error) {\n\tconn = Connection{username: username, password: password, dsn: dsn}\n\t/*\n\t\tif pool != nil {\n\t\t\tconn.environment = pool.environment\n\t\t} else\n\t*/\n\tconn.environment, err = NewEnvironment()\n\n\treturn conn, err\n}", "func New(ctx context.Context, url string) (*DB, error) {\n\tdb, err := pgxpool.Connect(ctx, url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to connection to database: %s\", err)\n\t}\n\n\treturn &DB{db}, err\n}", "func NewDatabase() (*gorm.DB, error) {\n\tlog.Info(\"Set up new database connection\")\n\n\tusername := os.Getenv(\"PSQL_USERNAME\")\n\tpassoword := os.Getenv(\"PSQL_PASSWORD\")\n\thostname := os.Getenv(\"PSQL_HOSTNAME\")\n\tdatabase := os.Getenv(\"PSQL_DATABASE\")\n\tport := os.Getenv(\"PSQL_PORT\")\n\n\n\tconnection := fmt.Sprintf(\"host=%s user=%s password=%s dbname=%s port=%s sslmode=disable\", hostname,\n\t\tusername, passoword, database, port)\n\n\tdb, err := gorm.Open(postgres.Open(connection), &gorm.Config{})\n\tif err != nil {\n\t\treturn db, err\n\t}\n\n\treturn db, nil\n}", "func PostgreSQLConnectionString(user, password, hostname, port, dbname, sslmode string) string {\n\ts, _ := pq.ParseURL(\n\t\tfmt.Sprintf(\n\t\t\t\"postgres://%s:%s@%s:%s/%s?sslmode=%s\",\n\t\t\tuser,\n\t\t\tpassword,\n\t\t\thostname,\n\t\t\tport,\n\t\t\tdbname,\n\t\t\tsslmode,\n\t\t),\n\t)\n\treturn s\n}", "func NewConnection(args *args.ArgumentList) (*SQLConnection, error) {\n\tdb, err := sqlx.Connect(\"mssql\", CreateConnectionURL(args))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SQLConnection{\n\t\tConnection: db,\n\t\tHost: args.Hostname,\n\t}, nil\n}", "func OpenConnection(props *DbProperties) *sql.DB {\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%s user=%s password=%s dbname=%s sslmode=disable\",\n\t\tprops.Host, props.Port, props.User, props.Password, props.Dbname)\n\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn db\n}", "func New() (*DB, error) {\n connStr := fmt.Sprintf(\n \"user=%s password=%s dbname=%s host=%s port=%s\",\n os.Getenv(\"USER_NAME\"),\n os.Getenv(\"USER_PASSWORD\"),\n os.Getenv(\"DB_NAME\"),\n os.Getenv(\"DB_HOST\"),\n os.Getenv(\"DB_PORT\"),\n )\n\n db, err := sql.Open(\"postgres\", connStr)\n if err != nil {\n return nil, err\n }\n if err = db.Ping(); err != nil {\n return nil, err\n }\n return &DB{db}, nil\n}", "func NewConnection(config dbConfig) (DatabaseConnection, error) {\n\tvar proto = \"tcp\"\n\treturn connectionBuilder(buildConnString(\n\t\tconfig.DBPath(),\n\t\tconfig.DBPort(),\n\t\tconfig.DBSchema(),\n\t\tconfig.DBUsername(),\n\t\tconfig.DBPassword(),\n\t\tproto,\n\t\ttrue,\n\t), config.DBSchema())\n}", "func SetupDatabaseConnection() *sql.DB {\n\tdbURL, err := pq.ParseURL(os.Getenv(\"DATABASE_URL\"))\n\tconfig.LogFatal(err)\n\n\tdb, err = sql.Open(\"postgres\", dbURL)\n\tconfig.LogFatal(err)\n\n\terr = db.Ping()\n\tconfig.LogFatal(err)\n\n\treturn db\n}", "func NewDatabaseConnection(address, password string, log *logger.Logger) Database {\n\treturn Database{\n\t\taddress: address,\n\t\tpassword: password,\n\t\tctx: context.Background(),\n\t\tlog: log,\n\t}\n}", "func newDatabase(count int) (*database, error) {\n\tdb, err := sql.Open(\"postgres\", fmt.Sprintf(`host=%s port=%s user=%s\n\t\tpassword=%s dbname=%s sslmode=disable`,\n\t\thost, port, user, password, dbname))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"connected to psql client, host: %s\\n\", host)\n\n\treturn &database{\n\t\tdb: db,\n\t\terrChan: make(chan error, count),\n\t}, nil\n}", "func New(ctx context.Context, ns string, databases ...string) harbormetav1.PostgresConnectionWithParameters {\n\tk8sClient := test.GetClient(ctx)\n\n\tpgName := test.NewName(\"pg\")\n\tpgPasswordName := test.NewName(\"pg-password\")\n\tpgConfigMapName := test.NewName(\"init-db\")\n\n\tgomega.Expect(k8sClient.Create(ctx, &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pgName,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\tName: \"http\",\n\t\t\t\tPort: postgresPort,\n\t\t\t}},\n\t\t\tSelector: map[string]string{\n\t\t\t\t\"pod-selector\": pgName,\n\t\t\t},\n\t\t},\n\t})).To(gomega.Succeed())\n\n\tgomega.Expect(k8sClient.Create(ctx, &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pgPasswordName,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tStringData: map[string]string{\n\t\t\tharbormetav1.PostgresqlPasswordKey: \"th3Adm1nPa55w0rd\",\n\t\t},\n\t\tType: harbormetav1.SecretTypePostgresql,\n\t})).To(gomega.Succeed())\n\n\tsql := \"\"\n\tfor _, database := range databases {\n\t\tsql += fmt.Sprintf(\"CREATE DATABASE %s WITH OWNER postgres;\", database)\n\t}\n\n\tgomega.Expect(k8sClient.Create(ctx, &corev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pgConfigMapName,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"init-db.sql\": sql,\n\t\t},\n\t})).To(gomega.Succeed())\n\n\tgomega.Expect(k8sClient.Create(ctx, &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pgName,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"pod-selector\": pgName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"pod-selector\": pgName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"custom-init-scripts\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: pgConfigMapName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tName: \"database\",\n\t\t\t\t\t\tImage: \"bitnami/postgresql:13.6.0\",\n\t\t\t\t\t\tEnv: []corev1.EnvVar{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"POSTGRESQL_PASSWORD\",\n\t\t\t\t\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\t\tName: pgPasswordName,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tKey: harbormetav1.PostgresqlPasswordKey,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tContainerPort: 5432,\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tMountPath: \"/var/lib/postgresql/data\",\n\t\t\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tMountPath: \"/docker-entrypoint-initdb.d\",\n\t\t\t\t\t\t\t\tName: \"custom-init-scripts\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})).To(gomega.Succeed())\n\n\treturn harbormetav1.PostgresConnectionWithParameters{\n\t\tPostgresConnection: harbormetav1.PostgresConnection{\n\t\t\tPostgresCredentials: harbormetav1.PostgresCredentials{\n\t\t\t\tPasswordRef: pgPasswordName,\n\t\t\t\tUsername: \"postgres\",\n\t\t\t},\n\t\t\tDatabase: \"postgres\",\n\t\t\tHosts: []harbormetav1.PostgresHostSpec{{\n\t\t\t\tHost: pgName,\n\t\t\t\tPort: 5432,\n\t\t\t}},\n\t\t},\n\t\tParameters: map[string]string{\n\t\t\tharbormetav1.PostgresSSLModeKey: string(harbormetav1.PostgresSSLModeDisable),\n\t\t},\n\t}\n}", "func NewConnection(ctx *pulumi.Context,\n\tname string, args *ConnectionArgs, opts ...pulumi.ResourceOption) (*Connection, error) {\n\tif args == nil || args.CloudSql == nil {\n\t\treturn nil, errors.New(\"missing required argument 'CloudSql'\")\n\t}\n\tif args == nil {\n\t\targs = &ConnectionArgs{}\n\t}\n\tvar resource Connection\n\terr := ctx.RegisterResource(\"gcp:bigquery/connection:Connection\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func newConnection(connString string) (*connection, error) {\n\n\tresult := &connection{parameters: make(map[string]string), usePreparedStmts: true}\n\n\tvar err error\n\tresult.connURL, err = url.Parse(connString)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult.clientPID = os.Getpid()\n\tif client_label := result.connURL.Query().Get(\"client_label\"); client_label != \"\" {\n\t\tresult.sessionID = client_label\n\t} else {\n\t\tresult.sessionID = fmt.Sprintf(\"%s-%s-%d-%d\", driverName, driverVersion, result.clientPID, time.Now().Unix())\n\t}\n\n\t// Read the interpolate flag.\n\tif iFlag := result.connURL.Query().Get(\"use_prepared_statements\"); iFlag != \"\" {\n\t\tresult.usePreparedStmts = iFlag == \"1\"\n\t}\n\n\t// Read Autocommit flag.\n\tif iFlag := result.connURL.Query().Get(\"autocommit\"); iFlag == \"\" || iFlag == \"1\" {\n\t\tresult.autocommit = \"on\"\n\t} else {\n\t\tresult.autocommit = \"off\"\n\t}\n\n\t// Read OAuth access token flag.\n\tresult.oauthaccesstoken = result.connURL.Query().Get(\"oauth_access_token\")\n\n\t// Read connection load balance flag.\n\tloadBalanceFlag := result.connURL.Query().Get(\"connection_load_balance\")\n\n\t// Read connection failover flag.\n\tbackupHostsStr := result.connURL.Query().Get(\"backup_server_node\")\n\tif backupHostsStr == \"\" {\n\t\tresult.connHostsList = []string{result.connURL.Host}\n\t} else {\n\t\t// Parse comma-separated list of backup host-port pairs\n\t\thosts := strings.Split(backupHostsStr, \",\")\n\t\t// Push target host to front of the hosts list\n\t\tresult.connHostsList = append([]string{result.connURL.Host}, hosts...)\n\t}\n\n\t// Read SSL/TLS flag.\n\tsslFlag := strings.ToLower(result.connURL.Query().Get(\"tlsmode\"))\n\tif sslFlag == \"\" {\n\t\tsslFlag = tlsModeNone\n\t}\n\n\tresult.conn, err = result.establishSocketConnection()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Load Balancing\n\tif loadBalanceFlag == \"1\" {\n\t\tif err = result.balanceLoad(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif sslFlag != tlsModeNone {\n\t\tif err = result.initializeSSL(sslFlag); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err = result.handshake(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = result.initializeSession(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result, nil\n}", "func newConnection() connection {\n\tdatabase := sqlx.MustOpen(driverName, \"user=erosai dbname=erosai password=Erosai11!! sslmode=disable\")\n\tc := connection{\n\t\tdb: database,\n\t}\n\n\tregisterConnection(&c)\n\n\treturn c\n}", "func connectPG(user, password, host, port, dbName, sslmode string) (*pg.DB, error) {\n\topt, err := pg.ParseURL(\n\t\tfmt.Sprintf(\"postgres://%s:%s@%s:%s/%s?sslmode=%s\",\n\t\t\tuser, password, host, port, dbName, sslmode))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pg.Connect(opt), nil\n}", "func NewPostgre(username, password, host, port, dbname, other string,\n\tmaxConnections, maxIdleConnection int) (db IDatabase, err error) {\n\n\tvar workDb postgresDb\n\n\t_, err = workDb.createConnection(username, password, host, port, dbname, other,\n\t\tmaxConnections, maxIdleConnection)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &workDb, nil\n}", "func InitDBConnection() *sql.DB {\n\tdb, err := sql.Open(\"postgres\", os.Getenv(\"DATABASE_URL\"))\n\t// if there is an error opening the connection, handle it\n\tif err != nil {\n\t\tfmt.Println(\"Cannot open SQL connection\")\n\t\tpanic(err.Error())\n\t}\n\n\treturn db\n}", "func NewPostgreSQL(s string) (*sql.DB, error) {\n\treturn sql.Open(\"postgres\", s)\n}", "func (c *CloudSqlConfiguration) CreateConnection() gorm.Dialector {\n\n\tsocketDir, isSet := os.LookupEnv(\"DB_SOCKET_DIR\")\n\tif !isSet {\n\t\tsocketDir = \"/cloudsql\"\n\t}\n\n\tconnectionString := fmt.Sprintf(\"user=%s password=%s database=%s host=%s/%s\", c.user, c.password, c.databaseName, socketDir, c.instanceConnectionName)\n\treturn postgres.Open(connectionString)\n}", "func NewPgSQL(config Config) *PgSQL {\n\tpg := PgSQL{db: config}\n\tvar err error\n\ttry := 0\n\tfor try < 10 {\n\t\ttime.Sleep(time.Duration(try) * time.Second) //Increasing time between tries. Starting with 0.\n\t\terr = pg.connect()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttry++\n\t\tfmt.Printf(\"Error connecting to the database. Err: %s \\n\", err.Error())\n\t\tfmt.Println(\"Retrying database connection... Try \" + strconv.Itoa(try) + \"/10\")\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"Error connecting to the database. Err: %s \\n\", err.Error())\n\t\tos.Exit(2)\n\t}\n\treturn &pg\n}", "func New(cfg *Config) (*Conn, error) {\r\n\tg, err := gorm.Open(\r\n\t\t\"postgres\",\r\n\t\tfmt.Sprintf(\r\n\t\t\t\"host=%s port=%d dbname=%s user=%s password=%s sslmode=disable\",\r\n\t\t\tcfg.Host,\r\n\t\t\tcfg.Port,\r\n\t\t\tcfg.Name,\r\n\t\t\tcfg.User,\r\n\t\t\tcfg.Password,\r\n\t\t),\r\n\t)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tc := &Conn{\r\n\t\tDB: g,\r\n\t\tlog: logrus.WithField(\"context\", \"db\"),\r\n\t}\r\n\treturn c, nil\r\n}", "func PgConn(dataSourceName string) *sql.DB {\n\tvar err error\n\tfmt.Println(\"connecting...\")\n\tdbDriver, err = sql.Open(\"postgres\", dataSourceName)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t//defer db.Close()\n\n\tif err = dbDriver.Ping(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tfmt.Println(\"#****Successfully connected*****#\")\n\treturn dbDriver\n}", "func NewDB(host, name, user, password string) (*PsqlDB, error) {\n\tlog.Printf(\n\t\t`\n-----------------Database Environment-----------------\nDATABASE_HOST: %s\nDATABASE_NAME: %s\nDATABASE_USER: %s\nDATABASE_PASSWORD: *redacted*\n------------------------------------------------------`,\n\t\thost, name, user,\n\t)\n\tdb, err := sql.Open(\"postgres\", fmt.Sprintf(\n\t\t\"dbname=%s user=%s password=%s host=%s sslmode=disable\",\n\t\tname, user, password, host,\n\t))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PsqlDB{db}, db.Ping()\n}", "func New(dsn string) (*Connection, error) {\n\tcon, err := sqlx.Connect(\"postgres\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Unsafe returns a version of DB which will silently succeed to scan when\n\t// columns in the SQL result have no fields in the destination struct.\n\tcon = con.Unsafe()\n\n\treturn &Connection{\n\t\tQueryable: NewQueryable(con),\n\t\tcon: con,\n\t\tdsn: dsn,\n\t}, nil\n}", "func NewDatabase(config settings.PostgresDatabase, logger Logger) (*Database, error) {\n\tconnStr := \"postgres://\" + config.User + \":\" + config.Password +\n\t\t\"@\" + config.Address + \"/\" + config.Address + \"?sslmode=disable&connect_timeout=1\"\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Database{\n\t\tsql: db,\n\t\tlogger: logger,\n\t}, nil\n}", "func Connect() (*sql.DB, error) {\n\thost := viper.GetString(\"google.cloudsql.host\")\n\tname := viper.GetString(\"google.cloudsql.name\")\n\tuser := viper.GetString(\"google.cloudsql.user\")\n\tpass := viper.GetString(\"google.cloudsql.pass\")\n\tdsn := fmt.Sprintf(\"host=%s dbname=%s user=%s password=%s sslmode=disable\",\n\t\thost, name, user, pass)\n\tdb, err := sql.Open(\"cloudsqlpostgres\", dsn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error while connecting to cloudsql: %v\", err)\n\t}\n\tlog.Printf(\"Connected to cloudsql %q\", host)\n\treturn db, nil\n}", "func New(connString string) (*Db, error) {\n\tdb, err := sql.Open(\"postgres\", connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check that our connection is good\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Db{db}, nil\n}", "func dbConnect() *sql.DB {\n\tdb, err := sql.Open(\"postgres\",\n\t\tfmt.Sprintf(\"user=%s password=%s dbname=%s host=%s port=%s sslmode=%s\",\n\t\t\tPG_USER,\n\t\t\tPG_PASSWORD,\n\t\t\tPG_DB,\n\t\t\tPG_HOST,\n\t\t\tPG_PORT,\n\t\t\tPG_SSL,\n\t\t))\n\tif err != nil {\n\t\tlog.Fatalf(\"ERROR: Error connecting to Postgres => %s\", err.Error())\n\t}\n\tlog.Printf(\"PQ Database connection made to %s\", PG_DB)\n\treturn db\n}", "func createConnPostgres(desc string, maxIdle, maxConn int) (*sql.DB, error) {\n\t// val := url.Values{}\n\t// val.Add(\"TimeZone\", \"Asia/Jakarta\")\n\t// dsn := fmt.Sprintf(\"%s&%s\", desc, val.Encode())\n\tsqlDb, err := sql.Open(`postgres`, desc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = sqlDb.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsqlDb.SetMaxIdleConns(maxIdle)\n\tsqlDb.SetMaxOpenConns(maxConn)\n\n\treturn sqlDb, nil\n}", "func connectToDatabase(conf base.Configuration) *sql.DB {\n\tdataSourceName := fmt.Sprintf(\"host=%s port=%s dbname=%s sslmode=%s user=%s password=%s\",\n\t\tconf.DbHost, conf.DbPort, conf.DbName, conf.DbSSLMode, conf.DbUser, conf.DbPassword)\n\tdb, err := sql.Open(\"postgres\", dataSourceName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn db\n}", "func NewDatabase(dsn string) (*sql.DB, error) {\n\tdb, err := sql.Open(\"pgx\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func NewPostgres() *sql.DB {\n\tprefix := \"\"\n\tif os.Getenv(\"ENVIRONMENT\") == \"test\" {\n\t\tprefix += \"TEST_\"\n\t}\n\tusername := os.Getenv(prefix + \"POSTGRES_USER\")\n\tpassword := os.Getenv(prefix + \"POSTGRES_PASSWORD\")\n\thost := os.Getenv(prefix + \"POSTGRES_HOST\")\n\tport := os.Getenv(prefix + \"POSTGRES_PORT\")\n\tdbName := os.Getenv(prefix + \"POSTGRES_DATABASE\")\n\n\tdb, _ := sql.Open(\"postgres\", fmt.Sprintf(\"postgres://%s:%s@%s:%s/%s?sslmode=disable\", username, password, host, port, dbName))\n\n\terr := db.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn db\n}", "func CreateConn(c *Conninfo) (conn *sql.DB, err error) {\n\t// Assemble libpq-style connection string\n\tconnstr := assembleConnstr(c)\n\t// Connect to Postgres using assembled connection string\n\tif conn, err = PQconnectdb(c, connstr); err != nil {\n\t\treturn nil, err\n\t}\n\t// Fill empty settings by normal values\n\tif err = replaceEmptySettings(c, conn); err != nil {\n\t\treturn nil, err\n\t}\n\t// Determine whether Postgres is local or not.\n\tcheckLocality(c)\n\t// Set session's safe settings.\n\tsetSafeSession(conn)\n\n\treturn conn, nil\n}", "func connect(ctx context.Context, conf *config.DatabaseConf) (*DB, error) {\n\tpgxConf, err := pgxpool.ParseConfig(conf.Url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpgxConf.MaxConns = conf.MaxConns\n\n\tpool, err := pgxpool.ConnectConfig(ctx, pgxConf)\n\treturn &DB{Pool: pool}, err\n}", "func NewConnection(s *mgo.Session, db, cname string) *Connection {\n\treturn &Connection{\n\t\tsession: s,\n\t\tDatabase: db,\n\t\tName: cname,\n\t}\n}", "func NewPGConnector() *PGConnector { return &PGConnector{} }", "func NewPGConnector() *PGConnector { return &PGConnector{} }", "func New(connectionStr string, options ...Option) (SQLExecutor, error) {\n\tdb, err := sqlx.Open(\"pgx\", connectionStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn := &Connection{db}\n\tconn.SetMaxOpenConns(defaultMaxConns)\n\tconn.SetMaxIdleConns(defaultIdleConns)\n\tfor _, op := range options {\n\t\top(conn)\n\t}\n\tif err := conn.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn conn, err\n}", "func NewPostgresConnection(d DB) (*PostgresConnection, error) {\n\tc := db.ConnectionConfigFromViper()\n\treplConn, err := pgx.ReplicationConnect(\n\t\tpgx.ConnConfig{Host: c.Host, Database: c.Name, User: c.User, Password: c.Password},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &PostgresConnection{\n\t\tdb: d,\n\t\treplConn: replConn,\n\t\tlog: logutil.NewLogger(\"postgres-replication-connection\"),\n\t}, nil\n}", "func NewPostgreSQLConn(dsn string, cfg *DBConfig) (ITransactionalDB, error) {\n\tpoolConfig, err := pgxpool.ParseConfig(dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// the lib will handle logging\n\tpoolConfig.MaxConns = cfg.MaxConn\n\tconn, err := pgxpool.ConnectConfig(context.Background(), poolConfig)\n\treturn &PGWrapper{conn}, err\n}", "func NewDatabase(config *AppConfig) (*DB, error) {\n\tdatabaseConfigureation := &stdlib.DriverConfig{\n\t\tConnConfig: pgx.ConnConfig{\n\t\t\tHost: config.DatabaseHost,\n\t\t\tUser: config.DatabaseUser,\n\t\t\tPassword: config.DatabasePass,\n\t\t\tDatabase: config.DatabaseName,\n\t\t\tPort: config.DatabasePort,\n\t\t},\n\t}\n\tstdlib.RegisterDriverConfig(databaseConfigureation)\n\tdatabase, err := sql.Open(\"pgx\", databaseConfigureation.ConnectionString(\"\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdatabase.SetMaxOpenConns(20)\n\tdatabase.SetMaxIdleConns(10)\n\t// db.SetConnMaxLifetime(time.Second * 10)\n\treturn &DB{database}, nil\n}", "func Connect() *sql.DB {\n\tfmtStr := \"host=%s port=%s user=%s \" +\n\t\t\"password=%s dbname=%s sslmode=disable\"\n\tpsqlInfo := fmt.Sprintf(\n\t\tfmtStr,\n\t\tos.Getenv(\"PG_HOST\"),\n\t\tos.Getenv(\"PG_PORT\"),\n\t\tos.Getenv(\"PG_USER\"),\n\t\tos.Getenv(\"PG_PASSWORD\"),\n\t\tos.Getenv(\"PG_DBNAME\"),\n\t)\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn db\n}", "func NewPostgresDB() (*sql.DB, error) {\n\t// get config\n\tcfg := config.Get()\n\n\t// connect to database\n\tdb, err := sql.Open(\"postgres\", fmt.Sprintf(\"host=%s port=%s user=%s dbname=%s password=%s sslmode=%s\",\n\t\tcfg.DB.Host,\n\t\tcfg.DB.Port,\n\t\tcfg.DB.Username,\n\t\tcfg.DB.DBName,\n\t\tcfg.DB.Password,\n\t\tcfg.DB.SSLMode,\n\t),\n\t)\n\tif err != nil {\n\t\tlog.Printf(\"Database connection: %v\", err)\n\t\treturn nil, err\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Printf(\"Lost database connection: %v\", err)\n\t\treturn nil, err\n\t}\n\tlog.Println(\"Successfully connected to database.\")\n\n\treturn db, nil\n}", "func New() Config {\n\thost := os.Getenv(\"HOST\")\n\tif host == \"\" {\n\t\thost = \"0.0.0.0\"\n\t}\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"3000\"\n\t}\n\n\taddr := host + \":\" + port\n\n\tsslmode := os.Getenv(\"DB_SSL_MODE\")\n\tif sslmode == \"\" {\n\t\tsslmode = \"require\"\n\t}\n\n\tpg := url.URL{\n\t\tScheme: \"postgres\",\n\t\tUser: url.UserPassword(\n\t\t\tos.Getenv(\"DB_USERNAME\"),\n\t\t\tos.Getenv(\"DB_PASSWORD\"),\n\t\t),\n\t\tHost: os.Getenv(\"DB_ADDRESS\"),\n\t\tPath: os.Getenv(\"DB_NAME\"),\n\t}\n\n\tv := url.Values{}\n\n\tv.Set(\"sslmode\", sslmode)\n\n\tpg.RawQuery = v.Encode()\n\n\treturn Config{host, port, addr, pg}\n\n}", "func ConnectPQ() (*sql.DB, error) {\n\thost := os.Getenv(\"YAGPDB_TEST_PQ_HOST\")\n\tif host == \"\" {\n\t\thost = \"localhost\"\n\t}\n\tuser := os.Getenv(\"YAGPDB_TEST_PQ_USER\")\n\tif user == \"\" {\n\t\tuser = \"yagpdb_test\"\n\t}\n\n\tdbPassword := os.Getenv(\"YAGPDB_TEST_PQ_PASSWORD\")\n\tsslMode := os.Getenv(\"YAGPDB_TEST_PQ_SSLMODE\")\n\tif sslMode == \"\" {\n\t\tsslMode = \"disable\"\n\t}\n\n\tdbName := os.Getenv(\"YAGPDB_TEST_PQ_DB\")\n\tif dbName == \"\" {\n\t\tdbName = \"yagpdb_test\"\n\t}\n\n\tif !strings.Contains(dbName, \"test\") {\n\t\tpanic(\"Test database name has to contain 'test'T this is a safety measure to protect against running tests on production systems.\")\n\t}\n\n\tconnStr := fmt.Sprintf(\"host=%s user=%s dbname=%s sslmode=%s password='%s'\", host, user, dbName, sslMode, dbPassword)\n\tconnStrPWCensored := fmt.Sprintf(\"host=%s user=%s dbname=%s sslmode=%s password='%s'\", host, user, dbName, sslMode, \"***\")\n\tfmt.Println(\"Postgres connection string being used: \" + connStrPWCensored)\n\n\tconn, err := sql.Open(\"postgres\", connStr)\n\treturn conn, err\n}", "func NewDBConn(masterHost string, masterPort int, dbname string) *dbconn.DBConn {\n\tcurrentUser, err := utils.System.CurrentUser()\n\tif err != nil {\n\t\tgplog.Error(\"Failed to look up current user: %s\", err)\n\t\tcurrentUser = &user.User{}\n\t}\n\tusername := tryEnv(\"PGUSER\", currentUser.Username)\n\n\tif dbname == \"\" {\n\t\tdbname = tryEnv(\"PGDATABASE\", \"\")\n\t}\n\n\thostname, err := utils.System.Hostname()\n\tif err != nil {\n\t\tgplog.Error(\"Failed to look up hostname: %s\", err)\n\t}\n\tif masterHost == \"\" {\n\t\tmasterHost = tryEnv(\"PGHOST\", hostname)\n\t}\n\n\treturn &dbconn.DBConn{\n\t\tConnPool: nil,\n\t\tNumConns: 0,\n\t\tDriver: dbconn.GPDBDriver{},\n\t\tUser: username,\n\t\tDBName: dbname,\n\t\tHost: masterHost,\n\t\tPort: masterPort,\n\t\tTx: nil,\n\t\tVersion: dbconn.GPDBVersion{},\n\t}\n}", "func NewPostgresDatabase() (*PostgresDatabase, error) {\n\tpdb := &PostgresDatabase{}\n\n\tconf, err := parsePostgresConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpdb.conf = conf\n\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\", conf.host, conf.port, conf.user, conf.password, conf.dbname)\n\tpdb.connStr = psqlInfo\n\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpdb.db = db\n\n\tlog.Info(\"db connection initialised\")\n\treturn pdb, nil\n}", "func NewPostgres(username, password, hostname, port, database, sslCA, sslCert, sslKey string) (*PostgresConnect, error) {\n\tif (sslCert == \"\" && sslKey != \"\") || (sslCert != \"\" && sslKey == \"\") {\n\t\treturn nil, fmt.Errorf(\"ssl-cert and ssl-key must be both set or unset.\")\n\t}\n\n\tdns, err := guessDNS(username, password, hostname, port, database, sslCA, sslCert, sslKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// db is closed in the dumper closer.\n\tdb, err := sql.Open(\"postgres\", dns)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PostgresConnect{\n\t\tbaseDNS: dns,\n\t\tDB: db,\n\t}, nil\n}", "func NewDB() *sqlx.DB {\n\tdb, err = sqlx.Connect(\"postgres\", os.Getenv(\"DATABASE_URL\"))\n\n\t/* Used for localhost tests */\n\tif err != nil {\n\t\tdb = sqlx.MustConnect(\"postgres\", fmt.Sprintf(\"host=%s port=%d user=%s \"+\n\t\t\t\"password=%s dbname=%s sslmode=disable\",\n\t\t\thost, port, user, password, dbname))\n\t}\n\n\tschema := `CREATE TABLE IF NOT EXISTS users (\n id INTEGER PRIMARY KEY,\n username VARCHAR (255) NOT NULL);`\n\n\t// execute a query on the server\n\tdb.MustExec(schema)\n\n\treturn db\n}", "func connectDb() *sql.DB {\n\tconnStr := \"user=postgres dbname=postgres sslmode=disable port=5000\"\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn db\n}", "func OpenConnection() *sql.DB {\n\terr := godotenv.Load(\"ENV.env\")\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading env file \\n\", err)\n\t}\n\tvar db *sql.DB\n\n\tdsn := fmt.Sprintf(\"host=%s user=%s password=%s dbname=%s port=%s sslmode=disable\",\n\t\tos.Getenv(\"DB_HOST\"),os.Getenv(\"DB_USERNAME\"), os.Getenv(\"DB_PASSWORD\"), os.Getenv(\"DB_NAME\"), os.Getenv(\"DB_PORT\"))\n\n\tlog.Print(\"Connecting to PostgreSQL DB...\")\n\tdb, err = sql.Open(\"postgres\",dsn)\n\n\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to connect to database. \\n\", err)\n\t\tos.Exit(2)\n\n\t}\n\tlog.Println(\"connected\")\n\treturn db;\n\n}", "func NewDBConn(ctx context.Context, maxConns int) (*DBConn, error) {\n\t// TODO: this + maxConns could be viper config based instead\n\tconnUrl := fmt.Sprintf(\n\t\t\"postgres://%s:%s@%s/%s\",\n\t\tos.Getenv(\"DB_USER\"),\n\t\tos.Getenv(\"DB_PASSWORD\"),\n\t\tos.Getenv(\"DB_HOST\"),\n\t\tos.Getenv(\"DB_NAME\"),\n\t)\n\n\tconfig, err := pgxpool.ParseConfig(connUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.MaxConns = int32(maxConns)\n\tconfig.LazyConnect = true\n\n\tpool, err := pgxpool.ConnectConfig(ctx, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &DBConn{pool}, nil\n}", "func NewDB(dsn string) (*sqlx.DB, error) {\n\tdb, err := sqlx.Connect(\"postgres\", dsn)\n\tif err != nil {\n\t\tlog.Println(\"Error connecting to Postgres\", err)\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}", "func CreatePostgresDatabaseConnection() (*gorm.DB, error) {\n\tpostgresconnection, err := gorm.Open(\"postgres\", PostgresConnectionString)\n\tif err != nil {\n\t\treturn nil, errors.New(\"[ POSTGRES CONNECTION FAILED ] \")\n\t}\n\treturn postgresconnection, nil\n}", "func connectPostgres(dsn string) (*postgresConnection, error) {\n\treturn newPostgresConnection(dsn)\n}", "func initTCPConnectionPool() (*sql.DB, error) {\n\t// [START cloud_sql_postgres_databasesql_create_tcp]\n\tvar (\n\t\tdbUser = mustGetenv(\"DB_USER\")\n\t\tdbPwd = mustGetenv(\"DB_PASS\")\n\t\tdbTCPHost = mustGetenv(\"DB_TCP_HOST\")\n\t\tdbPort = mustGetenv(\"DB_PORT\")\n\t\tdbName = mustGetenv(\"DB_NAME\")\n\t)\n\n\tvar dbURI string\n\tdbURI = fmt.Sprintf(\"host=%s user=%s password=%s port=%s database=%s\", dbTCPHost, dbUser, dbPwd, dbPort, dbName)\n\n\t// dbPool is the pool of database connections.\n\tdbPool, err := sql.Open(\"pgx\", dbURI)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sql.Open: %v\", err)\n\t}\n\n\t// [START_EXCLUDE]\n\tconfigureConnectionPool(dbPool)\n\t// [END_EXCLUDE]\n\n\treturn dbPool, nil\n\t// [END cloud_sql_postgres_databasesql_create_tcp]\n}", "func CreateConnection_old(host string, port string, user string, pwd string) (*sql.DB, error) {\n\t// Create dsn\n\tdsn := \"hdb://\" + user + \":\" + pwd + \"@\" + host + \":\" + port\n\t// Create connector\n\tconnector, err := driver.NewDSNConnector(dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Set connectior option\n\tconnector.SetFetchSize(512)\n\t// Create db object\n\tdb := sql.OpenDB(connector)\n\t// Test connection\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Return\n\treturn db, nil\n}", "func NewConnection() (*Connection, error) {\n\t// Open a session to the DB\n\ts, err := gorethink.Connect(gorethink.ConnectOpts{\n\t\tDatabase: core.Config.RethinkDbDatabase,\n\t\tAddress: fmt.Sprintf(\n\t\t\t\"%v:%v\", core.Config.RethinkDbIP, core.Config.RethinkDbPort),\n\t\tMaxIdle: 20,\n\t\tMaxOpen: 20,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.SetMaxOpenConns(5)\n\n\t_, err = gorethink.Wait().Run(s)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb := gorethink.DB(core.Config.RethinkDbDatabase)\n\n\tconnection := &Connection{\n\t\ts: s,\n\t\tdb: db,\n\t\tjobsTable: db.Table(\"jobs\"),\n\t\tworkersTable: db.Table(\"workers\"),\n\t}\n\n\treturn connection, nil\n}", "func NewDatabase(\n\to *DbOptions,\n\toa *AppOptions,\n\tl log.Logger,\n) (db.Session, error) {\n\tvar db *sqlx.DB\n\tvar err error\n\ttimeout := 15 * time.Second\n\tdeadline := time.Now().Add(timeout)\n\n\tmode := oa.Mode\n\t// register database driver proxy to log sql expresion\n\tproxyName := fmt.Sprintf(\"%s-proxy\", o.Code)\n\tonce.Do(func() {\n\t\tswitch o.Code {\n\t\tcase \"postgres\":\n\t\t\tsql.Register(proxyName, sqlhooks.Wrap(&pq.Driver{}, &hook{\n\t\t\t\tMode: mode,\n\t\t\t\tLogger: l,\n\t\t\t}))\n\t\tdefault:\n\t\t\tsql.Register(proxyName, sqlhooks.Wrap(&pq.Driver{}, &hook{\n\t\t\t\tMode: mode,\n\t\t\t\tLogger: l,\n\t\t\t}))\n\t\t}\n\t\tl.Info(context.Background(), \"SQL MODE: %s\", mode)\n\t})\n\n\t// connect to database server in 5 seconds\n\tfor {\n\t\tif time.Now().After(deadline) {\n\t\t\treturn nil, fmt.Errorf(\"database did not start up in %s (%v)\", timeout, err)\n\t\t}\n\t\t//db, err := sql.Open(\"postgres-proxy\", o.DSN)\n\t\tdb, err = sqlx.Open(proxyName, o.DSN)\n\t\tl.Debug(context.Background(), \"sqlx is openning...\")\n\t\tif err != nil {\n\t\t\ttime.Sleep(timeout / 5)\n\t\t\tcontinue\n\t\t}\n\t\terr = db.Ping()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(timeout / 5)\n\t}\n\n\tswitch o.Code {\n\tcase \"postgres\":\n\t\treturn bpostgres.New(db, l), nil\n\tdefault:\n\t\treturn bpostgres.New(db, l), nil\n\t}\n}", "func New(l *zap.SugaredLogger, name string, opts config.Database) (*Database, error) {\n\t// set up configuration\n\tvar connConfig pgx.ConnConfig\n\tif opts.PostgresConnURL != \"\" {\n\t\tl.Info(\"parsing conn string\")\n\t\tvar err error\n\t\tconnConfig, err = pgx.ParseURI(opts.PostgresConnURL)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to read db conn url: %v\", err)\n\t\t}\n\t} else {\n\t\tl.Info(\"using provided parameters\")\n\t\tport, _ := strconv.Atoi(opts.Port)\n\t\tconnConfig = pgx.ConnConfig{\n\t\t\tHost: opts.Host,\n\t\t\tPort: uint16(port),\n\t\t\tDatabase: opts.Database,\n\n\t\t\t// authentication\n\t\t\tUser: opts.User,\n\t\t\tPassword: opts.Password,\n\t\t\tTLSConfig: opts.TLS,\n\n\t\t\t// misc metadata\n\t\t\tRuntimeParams: map[string]string{\n\t\t\t\t\"application_name\": name,\n\t\t\t},\n\t\t\tLogger: zpgx.NewLogger(l.Desugar().Named(\"px\"), zpgx.Options{\n\t\t\t\tLogInfoAsDebug: true,\n\t\t\t}),\n\t\t}\n\t}\n\tl.Infow(\"set up configuration\",\n\t\t\"host\", connConfig.Host,\n\t\t\"database\", connConfig.Database)\n\n\t// init connection pool\n\tpool, err := pgx.NewConnPool(pgx.ConnPoolConfig{\n\t\tConnConfig: connConfig,\n\t\tAcquireTimeout: 30 * time.Second,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to init db: %v\", err)\n\t}\n\n\t// create struct\n\tvar db = &Database{\n\t\tpg: pool,\n\t\tl: l,\n\t}\n\n\t// set up statements and whatnot\n\tdb.Repos().init()\n\n\treturn db, nil\n}", "func NewDB(driver, source string) (*sqlx.DB, error) {\n\tdb, err := sqlx.Connect(driver, source)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func OpenConnection() *sql.DB {\r\n\tpsqlInfo := fmt.Sprintf(\"user=%s \"+\"password=%s dbname=%s sslmode=disable\", user, password, dbname)\r\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\r\n\tcheckErr(err)\r\n\terr = db.Ping()\r\n\tcheckErr(err)\r\n\t//fmt.Println(\"Connected!\")\r\n\treturn db\r\n}", "func New(config *Config) (*Database, error) {\n\tpsqlInfo := fmt.Sprintf(\"host=%v port=%v user=%v \"+\n\t\t\"password=%v dbname=%v sslmode=disable\",\n\t\tconfig.Host, config.Port, config.User, config.Password, config.DbName)\n\n\tdb, _ := sql.Open(\"postgres\", psqlInfo)\n\terr := db.Ping()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error to connect to database\")\n\t}\n\n\treturn &Database{db}, nil\n}", "func NewDB() *DB {\n\treturn &DB{\n\t\tconnectDatabase(\"postgres://user:pass@localhost:5432/sample?sslmode=disable\"),\n\t}\n}", "func NewPostgresDB(driver, host, username, password, dbname string, timeout, port int) (*Postgres, error) {\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%d user=%s password=%s dbname=%s connect_timeout=%d sslmode=disable\", host, port, username, password, dbname, timeout)\n\tdb, err := sql.Open(driver, psqlInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpostgresDB := &Postgres{\n\t\tDB: db,\n\t}\n\n\treturn postgresDB, nil\n}", "func NewDB(pguri string) (*DB, error) {\n\tdb, err := sql.Open(\"postgres\", pguri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DB{db}, nil\n}", "func NewDBConnection() (*dbConnection, error) {\n\tdb, err := sql.Open(\"mysql\", \"nameOfTheDB:password@tcp(127.0.0.1:3306)/comments\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &dbConnection{DB: db}, nil\n}", "func NewTestDatabase(t *testing.T) *sqlx.DB {\n\tt.Helper()\n\n\tdriver := os.Getenv(\"DATABASE_DRIVER\")\n\tdsn := os.Getenv(\"DATABASE_URL\")\n\n\tdb, err := sqlx.Open(driver, dsn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdb.SetConnMaxLifetime(-1)\n\n\treturn db\n}", "func InitDatabase() *sql.DB {\n\tlog.Println(\"connecting database.\")\n\n\tquery := url.Values{}\n\tquery.Add(\"database\", \"Company\")\n\n\tu := &url.URL{\n\t\tScheme: \"sqlserver\",\n\t\tUser: url.UserPassword(\"sa\", \"1234\"),\n\t\tHost: fmt.Sprintf(\"%s:%d\", \"localhost\", 1433),\n\t\t// Path: instance, // if connecting to an instance instead of a port\n\t\tRawQuery: query.Encode(),\n\t}\n\n\tlog.Println(u.String())\n\n\tcondb, err := sql.Open(\"sqlserver\", u.String())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlog.Println(\"test ping database.\")\n\tif err = condb.Ping(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn condb\n}", "func DBConnectionURL() string {\n\treturn fmt.Sprintf(\"postgres://%s:%s@%s:5432/%s?sslmode=disable\", appConfig.dbUsername, appConfig.dbPassword, appConfig.dbHost, appConfig.dbName)\n}", "func New(ctx context.Context, logger log.Logger, config DatabaseConfig) (*sql.DB, error) {\n\tif config.MySQL != nil {\n\t\tpreppedDb, err := mysqlConnection(logger, config.MySQL, config.DatabaseName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdb, err := preppedDb.Connect(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn ApplyConnectionsConfig(db, &config.MySQL.Connections, logger), nil\n\n\t} else if config.Spanner != nil {\n\t\treturn spannerConnection(logger, *config.Spanner, config.DatabaseName)\n\t}\n\n\treturn nil, fmt.Errorf(\"database config not defined\")\n}", "func Connect(cf *SQLConfig) (*DB, error) {\n\tconnectionString := fmt.Sprintf(\"postgres://%v:%v@%v:%v/%v?sslmode=%v\", cf.Username, cf.Password, cf.Host, cf.Post, cf.Database, cf.SSLMode)\n\tdb, err := sql.Open(\"postgres\", connectionString)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tpingErr := db.Ping()\n\t\tif pingErr != nil {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Cannot connect to database. Error: %s\", pingErr.Error()))\n\t\t} else {\n\t\t\treturn &DB{db}, nil\n\t\t}\n\t}\n}", "func (c *PGConnector) Open(cfg *config.Config) (*sql.DB, error) {\n\tsslmode := \"disable\"\n\tif cfg.DBSSLModeOption == \"enable\" {\n\t\tsslmode = \"require\"\n\t}\n\n\tdbstring := fmt.Sprintf(\"user=%s dbname=%s sslmode=%s password=%s host=%s port=%s\",\n\t\tcfg.DBUserName,\n\t\tcfg.DBName,\n\t\tsslmode,\n\t\tcfg.DBPassword,\n\t\tcfg.DBHostname,\n\t\tcfg.DBPort,\n\t)\n\n\treturn sql.Open(\"postgres\", dbstring)\n}", "func (c *PGConnector) Open(cfg *config.Config) (*sql.DB, error) {\n\tsslmode := \"disable\"\n\tif cfg.DBSSLModeOption == \"enable\" {\n\t\tsslmode = \"require\"\n\t}\n\n\tdbstring := fmt.Sprintf(\"user=%s dbname=%s sslmode=%s password=%s host=%s port=%s\",\n\t\tcfg.DBUserName,\n\t\tcfg.DBName,\n\t\tsslmode,\n\t\tcfg.DBPassword,\n\t\tcfg.DBHostname,\n\t\tcfg.DBPort,\n\t)\n\n\treturn sql.Open(\"postgres\", dbstring)\n}", "func NewDB(conf *Config) (db *DB, err error) {\n\tdb = new(DB)\n\tdb.DB, err = sql.Open(\"postgres\", conf.OpenDBURL())\n\t// TODO (kostyarin): configure db: max idle, max open, lifetime, etc\n\t// using hardcoded values, or keeping the values in\n\t// the Config\n\treturn\n}", "func NewPostgres(conn string) *Postgres {\n\tdb, err := gorm.Open(\"postgres\", conn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tpg := &Postgres{\n\t\tInstance: db,\n\t}\n\treturn pg\n}", "func NewPostgres(p Postgres) (*pgx.Conn, error) {\r\n\r\n\tdbconf := configDB(p)\r\n\r\n\tPostgres, err := pgx.Connect(dbconf)\r\n\r\n\tif err != nil {\r\n\t\tlog.Fatalln(\"[ERROR] postgres.NewPostgres() '%s'\", err)\r\n\t}\r\n\r\n\treturn Postgres, err\r\n}", "func NewDB(conf Config) (*DB, error) {\n\tdb, err := sql.Open(\"postgres\", conf.ConnStr())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DB{db: db, conf: conf}, nil\n}", "func getDBConnectionFromEnv() (string, error) {\n\n\tcfg := postgresConf{}\n\terr := env.Parse(&cfg)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"postgres://%s:%s@%s:%d/%s?sslmode=disable\", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Database), nil\n}", "func NewPostgres(config *PostgresConfig) (*Postgres, error) {\n\tdb, err := sql.Open(\"postgres\", config.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb.SetMaxOpenConns(config.MaxOpenConns)\n\n\treturn &Postgres{db: db}, nil\n}", "func NewDBConn(authCfg *configs.AuthConfig) (*gorm.DB, error) {\n\t// db connection\n\tdbConn, err := gorm.Open(\"postgres\", DefaultGetDSNHandler(authCfg))\n\tif err != nil {\n\t\terr = fmt.Errorf(\"gorm.Open db connection fail : %v\", err)\n\t\treturn nil, err\n\t}\n\treturn dbConn, err\n}", "func NewDB(dbConnString string, log logger.Logger, wg *sync.WaitGroup, netParams *params.ChainParams) *Database {\n\n\tgdb, err := gorm.Open(postgres.Open(dbConnString), &gorm.Config{})\n\tif err != nil {\n\t\tlog.Fatal(\"failed to connect database\")\n\t}\n\n\tdbclient := &Database{\n\t\tlog: log,\n\t\tDB: gdb,\n\t\tcanClose: wg,\n\t\tnetParams: netParams,\n\n\t\taccountBalanceNotifiers: make(map[string]map[uuid.UUID]*AccountBalanceNotify),\n\t\ttipNotifiers: make(map[uuid.UUID]*TipNotify),\n\t}\n\n\tdbclient.log.Info(\"Database connection established\")\n\n\treturn dbclient\n}" ]
[ "0.7518975", "0.74835324", "0.73841846", "0.71606356", "0.7159095", "0.7137663", "0.7134973", "0.7128595", "0.70870835", "0.7032927", "0.6934409", "0.6895356", "0.6789603", "0.67831814", "0.6710152", "0.6689289", "0.6680991", "0.66804385", "0.6673196", "0.665048", "0.6578278", "0.6578195", "0.6556452", "0.65521055", "0.6542972", "0.6541578", "0.65363425", "0.65292275", "0.6521743", "0.652151", "0.6515643", "0.65041715", "0.6490695", "0.6488032", "0.64843386", "0.6483402", "0.64749026", "0.645419", "0.6450303", "0.6449055", "0.64347875", "0.6417095", "0.6405997", "0.63865256", "0.6377455", "0.63768446", "0.63694745", "0.6368793", "0.6367979", "0.63559765", "0.6351763", "0.63516563", "0.6351632", "0.6339713", "0.6339713", "0.63272727", "0.632497", "0.6324622", "0.63225025", "0.6312287", "0.6294269", "0.6293548", "0.6292863", "0.6276927", "0.6264461", "0.62641865", "0.6253737", "0.62516546", "0.6250551", "0.62453955", "0.624339", "0.6231122", "0.622987", "0.6221414", "0.6215833", "0.6202806", "0.6199432", "0.6195231", "0.61854744", "0.6158185", "0.6145077", "0.6142198", "0.6141098", "0.6140157", "0.61331576", "0.61304545", "0.6124866", "0.6121719", "0.61207986", "0.61202395", "0.6118441", "0.6118441", "0.61163366", "0.61132103", "0.6104529", "0.61040485", "0.61034584", "0.6094261", "0.60938907", "0.60836124" ]
0.7736596
0
GetCluster retrieves the cluster associated with clusterName
func (p PGSQLConnection) GetCluster(clusterName string) (*ClusterModel, error) { cluster := new(ClusterModel) if err := p.connection.Get(cluster, fmt.Sprintf("SELECT * FROM clusters WHERE cluster_name=%s", clusterName)); err != nil { return nil, err } return cluster, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *ClusterProvider) GetCluster(ctx context.Context, clusterName string) (*ekstypes.Cluster, error) {\n\tinput := &awseks.DescribeClusterInput{\n\t\tName: &clusterName,\n\t}\n\n\toutput, err := c.AWSProvider.EKS().DescribeCluster(ctx, input)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to describe control plane %q\", clusterName)\n\t}\n\tlogger.Debug(\"cluster = %#v\", output)\n\n\tif output.Cluster.Status == ekstypes.ClusterStatusActive {\n\t\tif logger.Level >= 4 {\n\t\t\tspec := &api.ClusterConfig{Metadata: &api.ClusterMeta{Name: clusterName}}\n\t\t\tstacks, err := c.NewStackManager(spec).ListStacksWithStatuses(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"listing CloudFormation stack for %q\", clusterName)\n\t\t\t}\n\t\t\tfor _, s := range stacks {\n\t\t\t\tlogger.Debug(\"stack = %#v\", *s)\n\t\t\t}\n\t\t}\n\t}\n\treturn output.Cluster, nil\n}", "func GetCluster(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ClusterState, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tvar resource Cluster\n\terr := ctx.ReadResource(\"aws:elasticache/cluster:Cluster\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (c *ClientIMPL) GetCluster(ctx context.Context) (resp Cluster, err error) {\n\tvar systemList []Cluster\n\tcluster := Cluster{}\n\tqp := c.APIClient().QueryParamsWithFields(&cluster)\n\n\tmajorMinorVersion, err := c.GetSoftwareMajorMinorVersion(ctx)\n\tif err != nil {\n\t\tlog.Errorf(\"Couldn't find the array version %s\", err.Error())\n\t} else {\n\t\tif majorMinorVersion >= 3.0 {\n\t\t\tqp.Select(\"nvm_subsystem_nqn\")\n\t\t}\n\t}\n\t_, err = c.APIClient().Query(\n\t\tctx,\n\t\tRequestConfig{\n\t\t\tMethod: \"GET\",\n\t\t\tEndpoint: clusterURL,\n\t\t\tQueryParams: qp,\n\t\t},\n\t\t&systemList)\n\terr = WrapErr(err)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn systemList[0], err\n}", "func (c *RancherClient) GetCluster(clusterName string) (*client.Cluster, error) {\n\tclusters, err := c.Client.ManagementClient.Cluster.List(clusterListOpts())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error listing clusters: %s\", err)\n\t}\n\tfor _, cluster := range clusters.Data {\n\t\tif cluster.Name == clusterName {\n\t\t\treturn &cluster, nil\n\t\t}\n\t}\n\n\treturn nil, &rancherError{fmt.Sprintf(\"Could not find cluster: %s\", clusterName), notFoundErr}\n}", "func GetCluster(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ClusterState, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tvar resource Cluster\n\terr := ctx.ReadResource(\"aws:docdb/cluster:Cluster\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (svc ServerlessClusterService) Get(ctx context.Context,\n\tinput *models.GetServerlessClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tName: \"cluster\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: cluster,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &cluster, resp, err\n}", "func GetCluster(ctx *pulumi.Context,\n\tname string, id pulumi.ID, state *ClusterState, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tinputs := make(map[string]interface{})\n\tif state != nil {\n\t\tinputs[\"applyImmediately\"] = state.ApplyImmediately\n\t\tinputs[\"arn\"] = state.Arn\n\t\tinputs[\"availabilityZones\"] = state.AvailabilityZones\n\t\tinputs[\"backupRetentionPeriod\"] = state.BackupRetentionPeriod\n\t\tinputs[\"clusterIdentifier\"] = state.ClusterIdentifier\n\t\tinputs[\"clusterIdentifierPrefix\"] = state.ClusterIdentifierPrefix\n\t\tinputs[\"clusterMembers\"] = state.ClusterMembers\n\t\tinputs[\"clusterResourceId\"] = state.ClusterResourceId\n\t\tinputs[\"endpoint\"] = state.Endpoint\n\t\tinputs[\"engine\"] = state.Engine\n\t\tinputs[\"engineVersion\"] = state.EngineVersion\n\t\tinputs[\"finalSnapshotIdentifier\"] = state.FinalSnapshotIdentifier\n\t\tinputs[\"hostedZoneId\"] = state.HostedZoneId\n\t\tinputs[\"iamDatabaseAuthenticationEnabled\"] = state.IamDatabaseAuthenticationEnabled\n\t\tinputs[\"iamRoles\"] = state.IamRoles\n\t\tinputs[\"kmsKeyArn\"] = state.KmsKeyArn\n\t\tinputs[\"neptuneClusterParameterGroupName\"] = state.NeptuneClusterParameterGroupName\n\t\tinputs[\"neptuneSubnetGroupName\"] = state.NeptuneSubnetGroupName\n\t\tinputs[\"port\"] = state.Port\n\t\tinputs[\"preferredBackupWindow\"] = state.PreferredBackupWindow\n\t\tinputs[\"preferredMaintenanceWindow\"] = state.PreferredMaintenanceWindow\n\t\tinputs[\"readerEndpoint\"] = state.ReaderEndpoint\n\t\tinputs[\"replicationSourceIdentifier\"] = state.ReplicationSourceIdentifier\n\t\tinputs[\"skipFinalSnapshot\"] = state.SkipFinalSnapshot\n\t\tinputs[\"snapshotIdentifier\"] = state.SnapshotIdentifier\n\t\tinputs[\"storageEncrypted\"] = state.StorageEncrypted\n\t\tinputs[\"tags\"] = state.Tags\n\t\tinputs[\"vpcSecurityGroupIds\"] = state.VpcSecurityGroupIds\n\t}\n\ts, err := ctx.ReadResource(\"aws:neptune/cluster:Cluster\", name, id, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func GetCluster(ctx *pulumi.Context,\n\tname string, id pulumi.ID, state *ClusterState, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tinputs := make(map[string]interface{})\n\tif state != nil {\n\t\tinputs[\"arn\"] = state.Arn\n\t\tinputs[\"bootstrapBrokers\"] = state.BootstrapBrokers\n\t\tinputs[\"bootstrapBrokersTls\"] = state.BootstrapBrokersTls\n\t\tinputs[\"brokerNodeGroupInfo\"] = state.BrokerNodeGroupInfo\n\t\tinputs[\"clientAuthentication\"] = state.ClientAuthentication\n\t\tinputs[\"clusterName\"] = state.ClusterName\n\t\tinputs[\"configurationInfo\"] = state.ConfigurationInfo\n\t\tinputs[\"currentVersion\"] = state.CurrentVersion\n\t\tinputs[\"encryptionInfo\"] = state.EncryptionInfo\n\t\tinputs[\"enhancedMonitoring\"] = state.EnhancedMonitoring\n\t\tinputs[\"kafkaVersion\"] = state.KafkaVersion\n\t\tinputs[\"numberOfBrokerNodes\"] = state.NumberOfBrokerNodes\n\t\tinputs[\"tags\"] = state.Tags\n\t\tinputs[\"zookeeperConnectString\"] = state.ZookeeperConnectString\n\t}\n\ts, err := ctx.ReadResource(\"aws:msk/cluster:Cluster\", name, id, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func (cb *clientBase) GetCluster() string {\n\treturn cb.cluster\n}", "func (p *v1Provider) GetCluster(w http.ResponseWriter, r *http.Request) {\n\thttpapi.IdentifyEndpoint(r, \"/v1/clusters/current\")\n\ttoken := p.CheckToken(r)\n\tif !token.Require(w, \"cluster:show_basic\") {\n\t\treturn\n\t}\n\tshowBasic := !token.Check(\"cluster:show\")\n\n\tfilter := reports.ReadFilter(r, p.Cluster.GetServiceTypesForArea)\n\tif showBasic {\n\t\tfilter.IsSubcapacityAllowed = func(serviceType, resourceName string) bool {\n\t\t\ttoken.Context.Request[\"service\"] = serviceType\n\t\t\ttoken.Context.Request[\"resource\"] = resourceName\n\t\t\treturn token.Check(\"cluster:show_subcapacity\")\n\t\t}\n\t}\n\n\tcluster, err := reports.GetClusterResources(p.Cluster, p.DB, filter)\n\tif respondwith.ErrorText(w, err) {\n\t\treturn\n\t}\n\trespondwith.JSON(w, 200, map[string]interface{}{\"cluster\": cluster})\n}", "func GetCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) (*api.ZookeeperCluster, error) {\n\tzk := &api.ZookeeperCluster{}\n\terr := f.Client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zk)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain created CR: %v\", err)\n\t}\n\tt.Logf(\"zk cluster has ready replicas %v\", zk.Status.ReadyReplicas)\n\treturn zk, nil\n}", "func GetCluster(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ClusterState, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tvar resource Cluster\n\terr := ctx.ReadResource(\"gcp:container/cluster:Cluster\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func Get(name string) (clusterapi.ClusterAPI, error) {\n\ttenant, err := utils.GetCurrentTenant()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinstance, err := readDefinition(tenant, name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get Cluster '%s': %s\", name, err.Error())\n\t}\n\tif instance == nil {\n\t\treturn nil, nil\n\t}\n\t_, err = instance.GetState()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get state of the cluster: %s\", err.Error())\n\t}\n\treturn instance, nil\n}", "func (c starterClusterServiceOp) Get(ctx context.Context, input *models.GetStarterClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tName: \"cluster\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: cluster,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &cluster, resp, err\n}", "func (m *RedisProxy) GetCluster() string {\n\tif m != nil {\n\t\treturn m.Cluster\n\t}\n\treturn \"\"\n}", "func (o *PendingDeleteCluster) GetCluster() (value *Cluster, ok bool) {\n\tok = o != nil && o.bitmap_&16 != 0\n\tif ok {\n\t\tvalue = o.cluster\n\t}\n\treturn\n}", "func (s *DeploymentsService) GetCluster() api.Cluster {\n\treturn s.cluster\n}", "func (s *ocmClient) GetCluster() (*ClusterInfo, error) {\n\n\t// fetch the clusterversion, which contains the internal ID\n\tcv := &configv1.ClusterVersion{}\n\terr := s.client.Get(context.TODO(), types.NamespacedName{Name: \"version\"}, cv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get clusterversion: %v\", err)\n\t}\n\texternalID := cv.Spec.ClusterID\n\n\tcsUrl, err := url.Parse(s.ocmBaseUrl.String())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't parse OCM API url: %v\", err)\n\t}\n\tcsUrl.Path = path.Join(csUrl.Path, CLUSTERS_V1_PATH)\n\n\tresponse, err := s.httpClient.R().\n\t\tSetQueryParams(map[string]string{\n\t\t\t\"page\": \"1\",\n\t\t\t\"size\": \"1\",\n\t\t\t\"search\": fmt.Sprintf(\"external_id = '%s'\", externalID),\n\t\t}).\n\t\tSetResult(&ClusterList{}).\n\t\tExpectContentType(\"application/json\").\n\t\tGet(csUrl.String())\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't query OCM cluster service: request to '%v' returned error '%v'\", csUrl.String(), err)\n\t}\n\n\toperationId := response.Header().Get(OPERATION_ID_HEADER)\n\tif response.IsError() {\n\t\treturn nil, fmt.Errorf(\"request to '%v' received error code %v, operation id '%v'\", csUrl.String(), response.StatusCode(), operationId)\n\t}\n\n\tlog.Info(fmt.Sprintf(\"request to '%v' received response code %v, operation id: '%v'\", csUrl.String(), response.StatusCode(), operationId))\n\n\tlistResponse := response.Result().(*ClusterList)\n\tif listResponse.Size != 1 || len(listResponse.Items) != 1 {\n\t\treturn nil, ErrClusterIdNotFound\n\t}\n\n\treturn &listResponse.Items[0], nil\n}", "func (o *ProjectDeploymentRuleResponse) GetCluster() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Cluster\n}", "func (dn *DNode) GetCluster(clusterType string) *meta.TscaleCluster {\n\treturn dn.watcher.GetCluster(clusterType)\n}", "func (c *krakenClusters) Get(name string, options v1.GetOptions) (result *v1alpha1.KrakenCluster, err error) {\n\tresult = &v1alpha1.KrakenCluster{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"krakenclusters\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (p *aksClusterProvider) Get(ctx *provider.Context, clusterID string, identity provider.Identity) (*provider.Cluster, error) {\n\tif err := p.setup(ctx.ConfigurationItems(), identity); err != nil {\n\t\treturn nil, fmt.Errorf(\"setting up aks provider: %w\", err)\n\t}\n\tp.logger.Infow(\"getting AKS cluster\", \"id\", clusterID)\n\n\tresourceID, err := id.FromClusterID(clusterID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting resource id: %w\", err)\n\t}\n\n\tclient := containerservice.NewManagedClustersClient(resourceID.SubscriptionID)\n\tclient.Authorizer = p.authorizer\n\n\tresult, err := client.Get(ctx.Context, resourceID.ResourceGroupName, resourceID.ResourceName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting cluster: %w\", err)\n\t}\n\n\tcluster := &provider.Cluster{\n\t\tName: *result.Name,\n\t\tID: clusterID,\n\t}\n\n\treturn cluster, nil\n}", "func GetCluster(ctx context.Context, clusterURL ClusterURL) (*cluster.Cluster, error) {\n\ttmpDir, cleanTmp, err := util.TempDir()\n\tdefer cleanTmp()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create temp dir: %w\", err)\n\t}\n\tcredFilePath := path.Join(tmpDir, fmt.Sprintf(\"%s.credential\", clusterURL.ClusterName))\n\tf, err := os.Create(credFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create cred file: %v\", err)\n\t}\n\tf.Close()\n\tcmd := exec.CommandContext(ctx, \"gcloud\", \"--project\", clusterURL.ProjectID, \"container\", \"clusters\", \"get-credentials\", clusterURL.ClusterName, \"--location\", clusterURL.Location)\n\tcmd.Env = append(cmd.Environ(), fmt.Sprintf(\"KUBECONFIG=%s\", credFilePath))\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to set credentials: %v; output: %s\", err, string(out))\n\t}\n\tconfigBytes, err := os.ReadFile(credFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read kubectl config file: %w\", err)\n\t}\n\tkubeCfg, err := clientcmd.RESTConfigFromKubeConfig(configBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse kubectl config file: %w\", err)\n\t}\n\tgkeCluster, err := cluster.New(kubeCfg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to instantiate GKE cluster client: %w\", err)\n\t}\n\treturn gkeCluster, nil\n}", "func GetCluster(ctx context.Context, clusterClient containerservice.ManagedClustersClient, resourceName string) (c containerservice.ManagedCluster, kubeConfig string, err error) {\n\tresourceGroupName := resourceName + \"-group\"\n\n\t// get kubeconfig for environment\n\tcredentialResults, err := clusterClient.ListClusterAdminCredentials(ctx, resourceGroupName, resourceName)\n\tif err != nil {\n\t\tlog.Printf(\"err getting cluster credentails: %v\", err)\n\t}\n\tif credentialResults.Kubeconfigs != nil {\n\t\tfor _, v := range *credentialResults.Kubeconfigs {\n\t\t\tif *v.Name == \"clusterAdmin\" {\n\t\t\t\tkubeConfig = string(*v.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\tc, err = clusterClient.Get(ctx, resourceGroupName, resourceName)\n\tif err != nil {\n\t\tfmt.Printf(\"Error getting cluster %v: %v\\n\", resourceName, err)\n\t}\n\n\treturn c, kubeConfig, err\n}", "func (br *Broker) GetCluster(clusterType string) *meta.TscaleCluster {\n\t// some error checking\n\tif br.metaWatcher == nil {\n\t\treturn nil\n\t}\n\n\treturn br.metaWatcher.GetCluster(clusterType)\n}", "func (ao *AppObjects) GetCluster() *models.Cluster {\n\treturn ao.Cluster\n}", "func (s *Server) GetCluster(id uint64) *api.Cluster {\n\treturn convertClusterToAPI(s.doGetCluster(id))\n}", "func (daemon *Daemon) GetCluster() Cluster {\n\treturn daemon.cluster\n}", "func (a ClustersAPI) Get(clusterID string) (httpmodels.GetResp, error) {\n\tvar clusterInfo httpmodels.GetResp\n\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/get\", data, nil)\n\tif err != nil {\n\t\treturn clusterInfo, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterInfo)\n\treturn clusterInfo, err\n}", "func GetCluster(cm *kuberlogicv1.KuberLogicService) (op interfaces.OperatorInterface, err error) {\n\top, err = serviceOperator.GetOperator(cm.Spec.Type)\n\tif err != nil {\n\t\treturn\n\t}\n\top.Init(cm, \"\")\n\treturn\n}", "func (o *VirtualizationIweHost) GetCluster() VirtualizationIweClusterRelationship {\n\tif o == nil || o.Cluster == nil {\n\t\tvar ret VirtualizationIweClusterRelationship\n\t\treturn ret\n\t}\n\treturn *o.Cluster\n}", "func (client OpenShiftManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result v20180930preview.OpenShiftManagedCluster, err error) {\n\treq, err := client.GetPreparer(ctx, resourceGroupName, resourceName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (o *HyperflexEncryption) GetCluster() HyperflexClusterRelationship {\n\tif o == nil || o.Cluster == nil {\n\t\tvar ret HyperflexClusterRelationship\n\t\treturn ret\n\t}\n\treturn *o.Cluster\n}", "func LookupCluster(ctx *pulumi.Context, args *LookupClusterArgs, opts ...pulumi.InvokeOption) (*LookupClusterResult, error) {\n\tvar rv LookupClusterResult\n\terr := ctx.Invoke(\"google-native:bigtableadmin/v2:getCluster\", args, &rv, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rv, nil\n}", "func (a *ClusterManagerAdapter) GetCluster(ctx context.Context, id uint) (workflow.EksCluster, error) {\n\tcommonCluster, err := a.clusterManager.GetClusterByIDOnly(ctx, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\teksCluster := commonCluster.(*cluster.EKSCluster)\n\treturn &Cluster{*eksCluster}, nil\n}", "func (elementConfiguration *ElementConfiguration) GetCluster(name string) (*ClusterConfiguration, error) {\n\t// determine dependency\n\telementConfiguration.ClustersX.RLock()\n\tclusterConfiguration, ok := elementConfiguration.Clusters[name]\n\telementConfiguration.ClustersX.RUnlock()\n\n\tif !ok {\n\t\treturn nil, errors.New(\"cluster configuration not found\")\n\t}\n\n\t// success\n\treturn clusterConfiguration, nil\n}", "func (c Client) Cluster() (Cluster, error) {\n\tvar cluster Cluster\n\terr := c.get(\"/pools/default\", &cluster)\n\treturn cluster, errors.Wrap(err, \"failed to get cluster\")\n}", "func GetClusterName(self *C.PyObject, args *C.PyObject) *C.PyObject {\n\tclusterName := clustername.GetClusterName()\n\n\tcStr := C.CString(clusterName)\n\tpyStr := C.PyString_FromString(cStr)\n\tC.free(unsafe.Pointer(cStr))\n\treturn pyStr\n}", "func (a *Client) V2GetCluster(ctx context.Context, params *V2GetClusterParams) (*V2GetClusterOK, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"v2GetCluster\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v2/clusters/{cluster_id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V2GetClusterReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V2GetClusterOK), nil\n\n}", "func (mcr *MiddlewareClusterRepo) GetByName(clusterName string) (metadata.MiddlewareCluster, error) {\n\tsql := `select id from t_meta_middleware_cluster_info where del_flag = 0 and cluster_name = ?;`\n\tlog.Debugf(\"metadata MiddlewareClusterRepo.GetByName() select sql: %s\", sql)\n\tresult, err := mcr.Execute(sql, clusterName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tid, err := result.GetInt(constant.ZeroInt, constant.ZeroInt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn mcr.GetByID(id)\n}", "func (o *StorageHyperFlexStorageContainer) GetCluster() HyperflexClusterRelationship {\n\tif o == nil || o.Cluster == nil {\n\t\tvar ret HyperflexClusterRelationship\n\t\treturn ret\n\t}\n\treturn *o.Cluster\n}", "func (a *Awaitility) GetToolchainCluster(t *testing.T, clusterType cluster.Type, namespace string, condition *toolchainv1alpha1.ToolchainClusterCondition) (toolchainv1alpha1.ToolchainCluster, bool, error) {\n\tclusters := &toolchainv1alpha1.ToolchainClusterList{}\n\tif err := a.Client.List(context.TODO(), clusters, client.InNamespace(a.Namespace), client.MatchingLabels{\n\t\t\"namespace\": namespace,\n\t\t\"type\": string(clusterType),\n\t}); err != nil {\n\t\treturn toolchainv1alpha1.ToolchainCluster{}, false, err\n\t}\n\tif len(clusters.Items) == 0 {\n\t\tt.Logf(\"no toolchaincluster resource with expected labels: namespace='%s', type='%s'\", namespace, string(clusterType))\n\t}\n\t// assume there is zero or 1 match only\n\tfor _, cl := range clusters.Items {\n\t\tif containsClusterCondition(cl.Status.Conditions, condition) {\n\t\t\treturn cl, true, nil\n\t\t}\n\t}\n\treturn toolchainv1alpha1.ToolchainCluster{}, false, nil\n}", "func (o *VirtualizationIweVirtualMachine) GetCluster() VirtualizationIweClusterRelationship {\n\tif o == nil || o.Cluster == nil {\n\t\tvar ret VirtualizationIweClusterRelationship\n\t\treturn ret\n\t}\n\treturn *o.Cluster\n}", "func (c *clusterNetwork) Get(networkName string) (result *sdnapi.ClusterNetwork, err error) {\n\tresult = &sdnapi.ClusterNetwork{}\n\terr = c.r.Get().Resource(\"clusterNetworks\").Name(networkName).Do().Into(result)\n\treturn\n}", "func (c *MultiClusterController) Get(clusterName, namespace, name string) (interface{}, error) {\n\tcluster := c.GetCluster(clusterName)\n\tif cluster == nil {\n\t\treturn nil, errors.NewClusterNotFound(clusterName)\n\t}\n\tinstance := utilscheme.Scheme.NewObject(c.objectType)\n\tdelegatingClient, err := cluster.GetDelegatingClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = delegatingClient.Get(context.TODO(), client.ObjectKey{\n\t\tNamespace: namespace,\n\t\tName: name,\n\t}, instance)\n\treturn instance, err\n}", "func LookupCluster(ctx *pulumi.Context, args *LookupClusterArgs, opts ...pulumi.InvokeOption) (*LookupClusterResult, error) {\n\tvar rv LookupClusterResult\n\terr := ctx.Invoke(\"gcp:container/getCluster:getCluster\", args, &rv, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rv, nil\n}", "func (a *Client) GetClusterID(params *GetClusterIDParams) (*GetClusterIDOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClusterIDParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getClusterId\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/metadata/id\",\n\t\tProducesMediaTypes: []string{\"application/json; qs=0.5\", \"application/vnd.schemaregistry+json; qs=0.9\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\", \"application/vnd.schemaregistry+json\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetClusterIDReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetClusterIDOK), nil\n\n}", "func (m *RollbackNodePoolUpgradeRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetCluster() VirtualizationVmwareClusterRelationship {\n\tif o == nil || o.Cluster == nil {\n\t\tvar ret VirtualizationVmwareClusterRelationship\n\t\treturn ret\n\t}\n\treturn *o.Cluster\n}", "func (m *GetNodePoolRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (s *federatedClusterLister) Get(name string) (*federation.FederatedCluster, error) {\n\tobj, exists, err := s.indexer.GetByKey(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(federation.Resource(\"federatedcluster\"), name)\n\t}\n\treturn obj.(*federation.FederatedCluster), nil\n}", "func (a *Client) GetClusterID(params *GetClusterIDParams) (*GetClusterIDOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClusterIDParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getClusterId\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/metadata/id\",\n\t\tProducesMediaTypes: []string{\"application/json; qs=0.5\", \"application/vnd.schemaregistry+json; qs=0.9\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\", \"application/vnd.schemaregistry+json\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetClusterIDReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetClusterIDOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for getClusterId: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (m *GetClusterRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (s *Scheduler) GetClusterResource() (*commtype.BcsClusterResource, error) {\n\n\tblog.Info(\"get cluster resource from mesos master\")\n\tif s.currMesosMaster == \"\" {\n\t\tblog.Error(\"get cluster resource error: no mesos master\")\n\t\treturn nil, fmt.Errorf(\"system error: no mesos master\")\n\t}\n\n\treturn s.GetMesosResourceIn(s.operatorClient)\n}", "func (s *ClusterStorage) Get(ctx context.Context) (*types.Cluster, error) {\n\n\tlog.V(logLevel).Debug(\"storage:etcd:cluster:> get meta|status\")\n\n\tconst filter = `\\b.+` + clusterStorage + `\\/(meta|status)\\b`\n\n\tclient, destroy, err := getClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer destroy()\n\n\tcluster := new(types.Cluster)\n\tkey := keyCreate(clusterStorage)\n\tif err := client.Map(ctx, key, filter, cluster); err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:cluster:> get err: %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\treturn cluster, nil\n}", "func Get(c *golangsdk.ServiceClient, id, cluster_id string) (r GetResult) {\n\t_, r.Err = c.Get(resourceURL(c, id, cluster_id), &r.Body, nil)\n\treturn\n}", "func (c *FakeDaskClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *kubernetesdaskorgv1.DaskCluster, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(daskclustersResource, c.ns, name), &kubernetesdaskorgv1.DaskCluster{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*kubernetesdaskorgv1.DaskCluster), err\n}", "func getClusterName(utils detectorUtils) (string, error) {\n\tresp, err := utils.fetchString(\"GET\", k8sSvcURL+cwConfigmapPath)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"getClusterName() error: %w\", err)\n\t}\n\n\t// parse JSON object returned from HTTP request\n\tvar respmap map[string]json.RawMessage\n\terr = json.Unmarshal([]byte(resp), &respmap)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"getClusterName() error: cannot parse JSON: %w\", err)\n\t}\n\tvar d data\n\terr = json.Unmarshal(respmap[\"data\"], &d)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"getClusterName() error: cannot parse JSON: %w\", err)\n\t}\n\n\tclusterName := d.ClusterName\n\n\treturn clusterName, nil\n}", "func (o *VirtualizationBaseHostPciDeviceAllOf) GetCluster() VirtualizationBaseClusterRelationship {\n\tif o == nil || o.Cluster == nil {\n\t\tvar ret VirtualizationBaseClusterRelationship\n\t\treturn ret\n\t}\n\treturn *o.Cluster\n}", "func (m *UpdateNodePoolRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (m *CreateNodePoolRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func getClusterForTransition(c *Context, clusterID, newState string) (*model.ClusterDTO, int, func()) {\n\tclusterDTO, status, unlockOnce := lockCluster(c, clusterID)\n\tif status != 0 {\n\t\treturn nil, status, unlockOnce\n\t}\n\n\tif clusterDTO.APISecurityLock {\n\t\tunlockOnce()\n\t\tlogSecurityLockConflict(\"cluster\", c.Logger)\n\t\treturn nil, http.StatusForbidden, unlockOnce\n\t}\n\n\tif !clusterDTO.ValidTransitionState(newState) {\n\t\tunlockOnce()\n\t\tc.Logger.Warnf(\"unable to transition cluster to %q while in state %q\", newState, clusterDTO.State)\n\t\treturn nil, http.StatusBadRequest, unlockOnce\n\t}\n\n\treturn clusterDTO, 0, unlockOnce\n}", "func GetClusterInstance(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ClusterInstanceState, opts ...pulumi.ResourceOption) (*ClusterInstance, error) {\n\tvar resource ClusterInstance\n\terr := ctx.ReadResource(\"aws:neptune/clusterInstance:ClusterInstance\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (a *Client) GetClusterInfo(params *GetClusterInfoParams, authInfo runtime.ClientAuthInfoWriter) (*GetClusterInfoOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClusterInfoParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetClusterInfo\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters/{name}/info\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClusterInfoReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetClusterInfoOK), nil\n\n}", "func (l *DatabaseCrendentialBackend) GetClusterByIdentifier(clusterIdentifier string) (*m.Cluster, error) {\n\tcluster := sqlstore.GetClusterByIdentifier(clusterIdentifier)\n\treturn cluster, nil\n}", "func (m *RateLimitServiceConfig) GetClusterName() string {\n\tif x, ok := m.GetServiceSpecifier().(*RateLimitServiceConfig_ClusterName); ok {\n\t\treturn x.ClusterName\n\t}\n\treturn \"\"\n}", "func (c *Controller) getDeployCluster(hr *appv1.HelmRequest) string {\n\tif hr.Spec.ClusterName != \"\" {\n\t\treturn hr.Spec.ClusterName\n\t}\n\n\treturn hr.ClusterName\n}", "func GetClusterId() string {\n\treturn axClusterId\n}", "func (o *NiatelemetryNexusDashboardsAllOf) GetClusterName() string {\n\tif o == nil || o.ClusterName == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ClusterName\n}", "func (m *SetNodePoolManagementRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (m *Manager) GetClusterID() string {\n\treturn m.clusterID\n}", "func GetZKCluster(t *testing.T, k8client client.Client, z *zkapi.ZookeeperCluster) (*zkapi.ZookeeperCluster, error) {\n\tzookeeper := &zkapi.ZookeeperCluster{}\n\terr := k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zookeeper)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain created CR: %v\", err)\n\t}\n\treturn zookeeper, nil\n}", "func (m *UpdateClusterRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (m *SetNodePoolSizeRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func GetManegementCluster(version, capiImage, capdImage string) ([]runtime.Object, error) {\n\tcapiObjects, err := GetCAPI(version, capiImage)\n\tif err != nil {\n\t\treturn []runtime.Object{}, err\n\t}\n\n\tnamespaceObj := GetNamespace()\n\tstatefulSet := GetStatefulSet(capdImage)\n\tclusterRole := GetClusterRole()\n\tclusterRoleBinding := GetClusterRoleBinding()\n\n\treturn append(capiObjects,\n\t\t&namespaceObj,\n\t\t&statefulSet,\n\t\t&clusterRole,\n\t\t&clusterRoleBinding,\n\t), nil\n}", "func (m *DeleteNodePoolRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (m *UpdateMasterRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (c *Client) GetClusterID(ctx context.Context, req *pdpb.GetClusterIDReq) (*pdpb.GetClusterIDRsp, error) {\n\trsp, err := c.proxyRPC(ctx,\n\t\treq,\n\t\tfunc() {\n\t\t\treq.From = c.name\n\t\t\treq.ID = c.seq\n\t\t},\n\t\tfunc(cc context.Context) (interface{}, error) {\n\t\t\treturn c.pd.GetClusterID(cc, req, grpc.FailFast(true))\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rsp.(*pdpb.GetClusterIDRsp), nil\n}", "func (a *Client) ShowCluster(params *ShowClusterParams, authInfo runtime.ClientAuthInfoWriter) (*ShowClusterOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewShowClusterParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ShowCluster\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters/{name}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ShowClusterReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ShowClusterOK), nil\n\n}", "func fetchCluster(c *gin.Context) string {\n\tconst key = \"cluster\"\n\n\tswitch {\n\tcase len(c.Param(key)) > 0:\n\t\treturn c.Param(key)\n\tcase len(c.Query(key)) > 0:\n\t\treturn c.Query(key)\n\tcase len(c.PostForm(key)) > 0:\n\t\treturn c.PostForm(key)\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func GetNatsCluster(name, namespace string, size int) *v1alpha2.NatsCluster {\n\treturn &v1alpha2.NatsCluster{\n\t\tTypeMeta: getNatsClusterTypeMeta(),\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: v1alpha2.ClusterSpec{\n\t\t\tSize: size,\n\t\t},\n\t}\n}", "func LookupWorkstationCluster(ctx *pulumi.Context, args *LookupWorkstationClusterArgs, opts ...pulumi.InvokeOption) (*LookupWorkstationClusterResult, error) {\n\topts = internal.PkgInvokeDefaultOpts(opts)\n\tvar rv LookupWorkstationClusterResult\n\terr := ctx.Invoke(\"google-native:workstations/v1beta:getWorkstationCluster\", args, &rv, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rv, nil\n}", "func (c *SiteReplicationSys) GetClusterInfo(ctx context.Context) (info madmin.SiteReplicationInfo, err error) {\n\tc.RLock()\n\tdefer c.RUnlock()\n\tif !c.enabled {\n\t\treturn info, nil\n\t}\n\n\tinfo.Enabled = true\n\tinfo.Name = c.state.Name\n\tinfo.Sites = make([]madmin.PeerInfo, 0, len(c.state.Peers))\n\tfor _, peer := range c.state.Peers {\n\t\tinfo.Sites = append(info.Sites, peer)\n\t}\n\tsort.SliceStable(info.Sites, func(i, j int) bool {\n\t\treturn info.Sites[i].Name < info.Sites[j].Name\n\t})\n\n\tinfo.ServiceAccountAccessKey = c.state.ServiceAccountAccessKey\n\treturn info, nil\n}", "func (m *SetNetworkPolicyRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (m *SetLegacyAbacRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (ch *ClusterHandler) GetCluster() app.Adapter {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tcontext := app.GetRequestContext(r)\n\n\t\t\tlogger := log.WithFields(log.Fields{\"package\": \"handlers\", \"event\": \"get_cluster\", \"request\": context.RequestId()})\n\n\t\t\tvars := mux.Vars(r)\n\t\t\tid := vars[\"id\"]\n\n\t\t\tif len(id) <= 0 {\n\t\t\t\terr := errors.New(\"missing required cluster id\")\n\t\t\t\tresponse := ErrorResponseAttributes{Title: \"get_cluster_error\", Detail: err.Error()}\n\t\t\t\tlogger.Error(err)\n\t\t\t\trespondWithJson(w, newErrorResponse(&response, context.RequestId()), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogger.Info(fmt.Sprintf(\"new request to get cluster '%v'\", id))\n\n\t\t\tcluster, err := ch.service.GetCluster(context.RequestId(), id)\n\t\t\tif err != nil {\n\t\t\t\tresponse := ErrorResponseAttributes{Title: \"get_cluster_error\", Detail: err.Error()}\n\t\t\t\tlogger.Error(err.Error())\n\t\t\t\trespondWithJson(w, newErrorResponse(&response, context.RequestId()), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Cluster does not exist\n\t\t\tif cluster == nil {\n\t\t\t\terr := errors.New(\"cluster not found\")\n\t\t\t\tresponse := ErrorResponseAttributes{Title: \"get_cluster_error\", Detail: err.Error()}\n\t\t\t\tlogger.Error(err)\n\t\t\t\trespondWithJson(w, newErrorResponse(&response, context.RequestId()), http.StatusNotFound)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogger.Info(fmt.Sprintf(\"responding to client with cluster '%v'\", id))\n\n\t\t\trespondWithJson(w, newClusterResponse(cluster, context.RequestId()), http.StatusOK)\n\t\t})\n\t}\n}", "func (o *HyperflexHxapDvUplink) GetCluster() HyperflexHxapClusterRelationship {\n\tif o == nil || o.Cluster == nil {\n\t\tvar ret HyperflexHxapClusterRelationship\n\t\treturn ret\n\t}\n\treturn *o.Cluster\n}", "func (r *ReconcileServiceSync) getClusterName() string {\n\tif clusterName != \"\" {\n\t\treturn clusterName\n\t}\n\n\tif os.Getenv(\"CLUSTER_NAME\") != \"\" {\n\t\tclusterName = os.Getenv(\"CLUSTER_NAME\")\n\t\treturn clusterName\n\t}\n\n\tnodes, err := r.getNodes()\n\tlogOnError(err, \"Failed to get nodes for getClusterName\")\n\tclusterName = getClusterName(nodes)\n\treturn clusterName\n}", "func (c *Container) GetClusterHealthCheck(ctx echo.Context) error {\n future := make(chan helpers.HealthCheckFuture)\n go helpers.GetHealthCheckFuture(helpers.HOST, future)\n result := <-future\n if result.Error != nil {\n return ctx.String(http.StatusInternalServerError, result.Error.Error())\n }\n return ctx.JSON(http.StatusOK, models.HealthCheckResponse{\n Data: models.HealthCheckInfo{\n DeadNodes: result.HealthCheck.DeadNodes,\n MostRecentUptime: result.HealthCheck.MostRecentUptime,\n UnderReplicatedTablets: result.HealthCheck.UnderReplicatedTablets,\n },\n })\n}", "func gkeCluster(project, location, clusterName string) (cluster *gkev1.Cluster, err error) {\n\tctx := context.Background()\n\tvar httpClient *http.Client\n\tif httpClient, err = google.DefaultClient(ctx, gkev1.CloudPlatformScope); err != nil {\n\t\treturn\n\t}\n\tvar gkeService *gkev1.Service\n\tif gkeService, err = gkev1.New(httpClient); err != nil {\n\t\treturn\n\t}\n\tcluster, err = gkeService.Projects.Locations.Clusters.\n\t\tGet(fmt.Sprintf(\"projects/%s/locations/%s/clusters/%s\", project, location, clusterName)).\n\t\tDo()\n\treturn\n}", "func GetClusterTemplate(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ClusterTemplateState, opts ...pulumi.ResourceOption) (*ClusterTemplate, error) {\n\tvar resource ClusterTemplate\n\terr := ctx.ReadResource(\"rancher2:index/clusterTemplate:ClusterTemplate\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s databaseClusterNamespaceLister) Get(name string) (*v1alpha1.DatabaseCluster, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"databasecluster\"), name)\n\t}\n\treturn obj.(*v1alpha1.DatabaseCluster), nil\n}", "func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {\n\tclusterInfos := config.config.Clusters\n\tclusterInfoName, required := config.getClusterName()\n\n\tmergedClusterInfo := clientcmdapi.NewCluster()\n\tif config.overrides != nil {\n\t\tmergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults, mergo.WithOverride)\n\t}\n\tif configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {\n\t\tmergo.Merge(mergedClusterInfo, configClusterInfo, mergo.WithOverride)\n\t} else if required {\n\t\treturn clientcmdapi.Cluster{}, fmt.Errorf(\"cluster %q does not exist\", clusterInfoName)\n\t}\n\tif config.overrides != nil {\n\t\tmergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo, mergo.WithOverride)\n\t}\n\n\t// * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data\n\t// otherwise, a kubeconfig containing a CA reference would return an error that \"CA and insecure-skip-tls-verify couldn't both be set\".\n\t// * An override of --certificate-authority should also override TLS skip settings and CA data, otherwise existing CA data will take precedence.\n\tif config.overrides != nil {\n\t\tcaLen := len(config.overrides.ClusterInfo.CertificateAuthority)\n\t\tcaDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData)\n\t\tif config.overrides.ClusterInfo.InsecureSkipTLSVerify || caLen > 0 || caDataLen > 0 {\n\t\t\tmergedClusterInfo.InsecureSkipTLSVerify = config.overrides.ClusterInfo.InsecureSkipTLSVerify\n\t\t\tmergedClusterInfo.CertificateAuthority = config.overrides.ClusterInfo.CertificateAuthority\n\t\t\tmergedClusterInfo.CertificateAuthorityData = config.overrides.ClusterInfo.CertificateAuthorityData\n\t\t}\n\n\t\t// if the --tls-server-name has been set in overrides, use that value.\n\t\t// if the --server has been set in overrides, then use the value of --tls-server-name specified on the CLI too. This gives the property\n\t\t// that setting a --server will effectively clear the KUBECONFIG value of tls-server-name if it is specified on the command line which is\n\t\t// usually correct.\n\t\tif config.overrides.ClusterInfo.TLSServerName != \"\" || config.overrides.ClusterInfo.Server != \"\" {\n\t\t\tmergedClusterInfo.TLSServerName = config.overrides.ClusterInfo.TLSServerName\n\t\t}\n\t}\n\n\treturn *mergedClusterInfo, nil\n}", "func (m *Info) GetClusterName() string {\n\tif m.ec2Tags != nil {\n\t\treturn m.ec2Tags.getClusterName()\n\t}\n\n\treturn \"\"\n}", "func GetDatabaseCluster(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *DatabaseClusterState, opts ...pulumi.ResourceOption) (*DatabaseCluster, error) {\n\tvar resource DatabaseCluster\n\terr := ctx.ReadResource(\"digitalocean:index/databaseCluster:DatabaseCluster\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s daskClusterNamespaceLister) Get(name string) (*v1.DaskCluster, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1.Resource(\"daskcluster\"), name)\n\t}\n\treturn obj.(*v1.DaskCluster), nil\n}", "func (m *ListNodePoolsRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (saIdentityRequest *ServiceAccountIdentityRequest) GetClusterID() string {\n\treturn saIdentityRequest.ClusterID\n}", "func (m *SetMonitoringServiceRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (m *SetNodePoolAutoscalingRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}" ]
[ "0.73472095", "0.7320279", "0.7242377", "0.72404385", "0.7170976", "0.7071498", "0.70645136", "0.70029813", "0.69894564", "0.6943833", "0.6940147", "0.6904284", "0.6854848", "0.6852869", "0.68033123", "0.6754213", "0.6730921", "0.6670034", "0.6630054", "0.66086984", "0.6603493", "0.6596473", "0.6595888", "0.6572788", "0.6570449", "0.6533729", "0.64703035", "0.6454105", "0.6433644", "0.63360393", "0.6332038", "0.63077694", "0.62798977", "0.623163", "0.6224668", "0.6212419", "0.6201011", "0.6200171", "0.6193907", "0.6181385", "0.6175221", "0.61139244", "0.6107581", "0.6103854", "0.6090185", "0.6081094", "0.60315317", "0.6028207", "0.60242325", "0.60213023", "0.60098225", "0.60055494", "0.59609616", "0.5952878", "0.59516966", "0.594718", "0.594413", "0.5933582", "0.5922687", "0.591341", "0.5888709", "0.58804274", "0.5874025", "0.58657926", "0.5864048", "0.5849587", "0.5849351", "0.58492106", "0.5829893", "0.58240783", "0.581544", "0.5814157", "0.5801643", "0.57889485", "0.57738644", "0.5769207", "0.5767883", "0.5763043", "0.57623994", "0.57576597", "0.5753822", "0.5750212", "0.57470965", "0.57323956", "0.57319427", "0.5730694", "0.5726255", "0.57240075", "0.5715778", "0.57127064", "0.5711052", "0.57080626", "0.57076424", "0.57048327", "0.5702768", "0.570158", "0.56944704", "0.5692417", "0.5691036", "0.56844264" ]
0.6456914
27
UpdateCluster updates a clusters color in the database
func (p PGSQLConnection) UpdateCluster(cluster *ClusterModel) error { tx, err := p.connection.Beginx() if err != nil { return err } _, err = tx.NamedExec("UPDATE clusters SET color = :color WHERE cluster_name = :cluster_name", cluster) if err != nil { return err } return tx.Commit() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func UpdateCluster(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Bind json into UpdateClusterRequest struct\")\n\n\t// bind request body to UpdateClusterRequest struct\n\tvar updateRequest banzaiTypes.UpdateClusterRequest\n\tif err := c.BindJSON(&updateRequest); err != nil {\n\t\t// bind failed, required field(s) empty\n\t\tbanzaiUtils.LogWarn(banzaiConstants.TagGetClusterInfo, \"Bind failed.\", err.Error())\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: \"Required field is empty\",\n\t\t\tcloud.JsonKeyError: err,\n\t\t})\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Load cluster from database\")\n\n\t// load cluster from db\n\tcl, err := cloud.GetClusterFromDB(c)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Start updating cluster:\", cl.Name)\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Update request: \", updateRequest)\n\tcloudType := cl.Cloud\n\n\tswitch cloudType {\n\tcase banzaiConstants.Amazon:\n\t\t// read amazon props from amazon_cluster_properties table\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Load amazon props from db\")\n\t\tdatabase.SelectFirstWhere(&cl.Amazon, banzaiSimpleTypes.AmazonClusterSimple{ClusterSimpleId: cl.ID})\n\tcase banzaiConstants.Azure:\n\t\t// read azure props from azure_cluster_properties table\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Load azure props from db\")\n\t\tdatabase.SelectFirstWhere(&cl.Azure, banzaiSimpleTypes.AzureClusterSimple{ClusterSimpleId: cl.ID})\n\tdefault:\n\t\t// not supported cloud type\n\t\tbanzaiUtils.LogWarn(banzaiConstants.TagGetClusterInfo, \"Not supported cloud type\")\n\t\tcloud.SendNotSupportedCloudResponse(c, banzaiConstants.TagUpdateCluster)\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Cluster to modify: \", cl)\n\n\tif isValid, err := updateRequest.Validate(*cl); isValid && len(err) == 0 {\n\t\t// validation OK\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Validate is OK\")\n\t\tif cloud.UpdateClusterInCloud(c, &updateRequest, *cl) {\n\t\t\t// cluster updated successfully in cloud\n\t\t\t// update prometheus config..\n\t\t\tupdatePrometheus()\n\t\t}\n\t} else {\n\t\t// validation failed\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Validation failed\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: err,\n\t\t})\n\t}\n\n}", "func UpdateCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) error {\n\tt.Logf(\"updating zookeeper cluster: %s\", z.Name)\n\terr := f.Client.Update(goctx.TODO(), z)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to update CR: %v\", err)\n\t}\n\n\tt.Logf(\"updated zookeeper cluster: %s\", z.Name)\n\treturn nil\n}", "func (a *Client) UpdateCluster(params *UpdateClusterParams, authInfo runtime.ClientAuthInfoWriter) (*UpdateClusterOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpdateClusterParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"UpdateCluster\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/api/v1/clusters/{name}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateClusterReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*UpdateClusterOK), nil\n\n}", "func (c *HandlerComp) intClusterUpdate(params ops.ClusterUpdateParams, ai *auth.Info, oObj *models.Cluster) (*models.Cluster, error) {\n\tctx := params.HTTPRequest.Context()\n\tvar err error\n\tif ai == nil {\n\t\tai, err = c.GetAuthInfo(params.HTTPRequest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar uP = [centrald.NumActionTypes][]string{\n\t\tcentrald.UpdateRemove: params.Remove,\n\t\tcentrald.UpdateAppend: params.Append,\n\t\tcentrald.UpdateSet: params.Set,\n\t}\n\tua, err := c.MakeStdUpdateArgs(emptyCluster, params.ID, params.Version, uP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif params.Payload == nil {\n\t\terr = c.eUpdateInvalidMsg(\"missing payload\")\n\t\treturn nil, err\n\t}\n\tif ua.IsModified(\"Name\") && params.Payload.Name == \"\" {\n\t\terr := c.eUpdateInvalidMsg(\"non-empty name is required\")\n\t\treturn nil, err\n\t}\n\tif oObj == nil {\n\t\tc.RLock()\n\t\tdefer c.RUnlock()\n\t\tc.ClusterLock()\n\t\tdefer c.ClusterUnlock()\n\t\toObj, err = c.DS.OpsCluster().Fetch(ctx, params.ID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.IsModified(\"ClusterUsagePolicy\") {\n\t\tif oObj.State != common.ClusterStateDeployable {\n\t\t\terr := c.eUpdateInvalidMsg(\"invalid state\")\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.validateClusterUsagePolicy(params.Payload.ClusterUsagePolicy, common.AccountSecretScopeCluster); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.Version == 0 {\n\t\tua.Version = int32(oObj.Meta.Version)\n\t} else if int32(oObj.Meta.Version) != ua.Version {\n\t\terr = centrald.ErrorIDVerNotFound\n\t\treturn nil, err\n\t}\n\tif err = c.app.AuditLog.Ready(); err != nil {\n\t\treturn nil, err\n\t}\n\tif ua.IsModified(\"ClusterVersion\") || ua.IsModified(\"Service\") || ua.IsModified(\"ClusterAttributes\") || ua.IsModified(\"ClusterIdentifier\") || ua.IsModified(\"State\") || ua.IsModified(\"Messages\") {\n\t\tif err = ai.InternalOK(); err != nil {\n\t\t\tc.app.AuditLog.Post(ctx, ai, centrald.ClusterUpdateAction, models.ObjID(params.ID), models.ObjName(oObj.Name), \"\", true, \"Update unauthorized\")\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif err = ai.CapOK(centrald.CSPDomainManagementCap, models.ObjIDMutable(oObj.AccountID)); err != nil {\n\t\t\tc.app.AuditLog.Post(ctx, ai, centrald.ClusterUpdateAction, models.ObjID(params.ID), models.ObjName(oObj.Name), \"\", true, \"Update unauthorized\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.IsModified(\"ClusterIdentifier\") {\n\t\tif !ua.IsModified(\"State\") {\n\t\t\terr := c.eMissingMsg(\"state must be set with clusterIdentifier\")\n\t\t\treturn nil, err\n\t\t}\n\t\t// when transitioning to DEPLOYABLE state ClusterIdentifier must be reset, e.g. set to empty string\n\t\tif params.Payload.State == common.ClusterStateDeployable && params.Payload.ClusterIdentifier != \"\" {\n\t\t\terr := c.eMissingMsg(\"clusterIdentifier must be cleared when transitioning to %s\", common.ClusterStateDeployable)\n\t\t\treturn nil, err\n\t\t}\n\t\t// ClusterIdentifier may be modified (set to non-empty value) only when changing state from DEPLOYABLE to MANAGED\n\t\tif !(oObj.State == common.ClusterStateDeployable && params.Payload.State == common.ClusterStateManaged) {\n\t\t\terr := c.eInvalidState(\"invalid state transition (%s ⇒ %s)\", oObj.State, params.Payload.State)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.IsModified(\"State\") {\n\t\tif !c.validateClusterState(params.Payload.State) {\n\t\t\terr := c.eUpdateInvalidMsg(\"invalid cluster state\")\n\t\t\treturn nil, err\n\t\t}\n\t\t// when transitioning from DEPLOYABLE state to MANAGED ClusterIdentifier is required\n\t\tif oObj.State == common.ClusterStateDeployable && params.Payload.State == common.ClusterStateManaged && (!ua.IsModified(\"ClusterIdentifier\") || params.Payload.ClusterIdentifier == \"\") {\n\t\t\terr := c.eMissingMsg(\"clusterIdentifier must be set when transitioning to %s\", common.ClusterStateManaged)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdom, err := c.ops.intCspDomainFetch(ctx, ai, string(oObj.CspDomainID))\n\tif err != nil {\n\t\tc.Log.Errorf(\"Cluster[%s]: error looking up CSPDomain[%s]: %s\", oObj.Meta.ID, oObj.CspDomainID, err.Error())\n\t\treturn nil, err\n\t}\n\tdetail := \"\"\n\tif a := ua.FindUpdateAttr(\"AuthorizedAccounts\"); a != nil && a.IsModified() {\n\t\tdetail, err = c.authAccountValidator.validateAuthorizedAccountsUpdate(ctx, ai, centrald.ClusterUpdateAction, params.ID, models.ObjName(oObj.Name), a, oObj.AuthorizedAccounts, params.Payload.AuthorizedAccounts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t// TBD: validate clusterAttributes by clusterType\n\tobj, err := c.DS.OpsCluster().Update(ctx, ua, params.Payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.clusterApplyInheritedProperties(ctx, ai, obj, dom) // no error possible\n\tif len(detail) > 0 {\n\t\tc.app.AuditLog.Post(ctx, ai, centrald.ClusterUpdateAction, models.ObjID(params.ID), models.ObjName(oObj.Name), \"\", false, fmt.Sprintf(\"Updated authorizedAccounts %s\", detail))\n\t}\n\tc.setDefaultObjectScope(params.HTTPRequest, obj)\n\treturn obj, nil\n}", "func (a *LocalKeyAgent) UpdateCluster(cluster string) {\n\ta.siteName = cluster\n}", "func (th *transitionHandler) PostRefreshCluster(reason string) stateswitch.PostTransition {\n\tret := func(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error {\n\t\tsCluster, ok := sw.(*stateCluster)\n\t\tif !ok {\n\t\t\treturn errors.New(\"PostRefreshCluster incompatible type of StateSwitch\")\n\t\t}\n\t\tparams, ok := args.(*TransitionArgsRefreshCluster)\n\t\tif !ok {\n\t\t\treturn errors.New(\"PostRefreshCluster invalid argument\")\n\t\t}\n\t\tvar (\n\t\t\tb []byte\n\t\t\terr error\n\t\t\tupdatedCluster *common.Cluster\n\t\t)\n\t\tb, err = json.Marshal(&params.validationResults)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tupdatedCluster, err = updateClusterStatus(logutil.FromContext(params.ctx, th.log), params.db, *sCluster.cluster.ID, sCluster.srcState, *sCluster.cluster.Status,\n\t\t\treason, \"validations_info\", string(b))\n\t\t//update hosts status to models.HostStatusResettingPendingUserAction if needed\n\t\tcluster := sCluster.cluster\n\t\tif updatedCluster != nil {\n\t\t\tcluster = updatedCluster\n\t\t}\n\t\tsetPendingUserResetIfNeeded(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, params.hostApi, cluster)\n\t\t//if status was changed - we need to send event and metrics\n\t\tif err == nil && updatedCluster != nil && sCluster.srcState != swag.StringValue(updatedCluster.Status) {\n\t\t\tmsg := fmt.Sprintf(\"Updated status of cluster %s to %s\", updatedCluster.Name, *updatedCluster.Status)\n\t\t\tparams.eventHandler.AddEvent(params.ctx, *updatedCluster.ID, nil, models.EventSeverityInfo, msg, time.Now())\n\t\t\t//report installation finished metric if needed\n\t\t\treportInstallationCompleteStatuses := []string{models.ClusterStatusInstalled, models.ClusterStatusError}\n\t\t\tif sCluster.srcState == models.ClusterStatusInstalling &&\n\t\t\t\tfunk.ContainsString(reportInstallationCompleteStatuses, swag.StringValue(updatedCluster.Status)) {\n\t\t\t\tparams.metricApi.ClusterInstallationFinished(logutil.FromContext(params.ctx, th.log), swag.StringValue(updatedCluster.Status),\n\t\t\t\t\tupdatedCluster.OpenshiftVersion, *updatedCluster.ID, updatedCluster.InstallStartedAt)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\treturn ret\n}", "func (m *Monitor) updateCluster(managedCluster *clusterv1.ManagedCluster) {\n\tglog.V(2).Info(\"Processing Cluster Update.\")\n\n\tclusterToUpdate := managedCluster.GetName()\n\tclusterVendor, version, clusterID := GetClusterClaimInfo(managedCluster)\n\tclusterIdx, found := Find(m.ManagedClusterInfo, types.ManagedClusterInfo{\n\t\tNamespace: clusterToUpdate,\n\t\tClusterID: clusterID,\n\t})\n\tif found && clusterID != m.ManagedClusterInfo[clusterIdx].ClusterID {\n\t\t// If the cluster ID has changed update it - otherwise do nothing.\n\t\tglog.Infof(\"Updating %s from Insights cluster list\", clusterToUpdate)\n\t\tm.ManagedClusterInfo[clusterIdx] = types.ManagedClusterInfo{\n\t\t\tClusterID: clusterID,\n\t\t\tNamespace: managedCluster.GetName(),\n\t\t}\n\t\treturn\n\t}\n\n\t// Case to add a ManagedCluster to cluster list after it has been upgraded to version >= 4.X\n\tif !found && clusterVendor == \"OpenShift\" && version >= 4 {\n\t\tglog.Infof(\"Adding %s to Insights cluster list - Cluster was upgraded\", managedCluster.GetName())\n\t\tm.ManagedClusterInfo = append(m.ManagedClusterInfo, types.ManagedClusterInfo{\n\t\t\tClusterID: clusterID,\n\t\t\tNamespace: managedCluster.GetName(),\n\t\t})\n\t}\n}", "func (s *Server) updateCluster(report *healthReport) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif s.connectivity.startTime.Before(report.startTime) {\n\t\ts.connectivity = report\n\t}\n}", "func (c *Controller) onUpdate(oldObj, newObj interface{}) {\n\toldcluster := oldObj.(*crv1.Pgcluster)\n\tnewcluster := newObj.(*crv1.Pgcluster)\n\n\tlog.Debugf(\"pgcluster onUpdate for cluster %s (namespace %s)\", newcluster.ObjectMeta.Namespace,\n\t\tnewcluster.ObjectMeta.Name)\n\n\t// if the status of the pgcluster shows that it has been bootstrapped, then proceed with\n\t// creating the cluster (i.e. the cluster deployment, services, etc.)\n\tif newcluster.Status.State == crv1.PgclusterStateBootstrapped {\n\t\tclusteroperator.AddClusterBase(c.Client, newcluster, newcluster.GetNamespace())\n\t\treturn\n\t}\n\n\t// if the 'shutdown' parameter in the pgcluster update shows that the cluster should be either\n\t// shutdown or started but its current status does not properly reflect that it is, then\n\t// proceed with the logic needed to either shutdown or start the cluster\n\tif newcluster.Spec.Shutdown && newcluster.Status.State != crv1.PgclusterStateShutdown {\n\t\tclusteroperator.ShutdownCluster(c.Client, *newcluster)\n\t} else if !newcluster.Spec.Shutdown &&\n\t\tnewcluster.Status.State == crv1.PgclusterStateShutdown {\n\t\tclusteroperator.StartupCluster(c.Client, *newcluster)\n\t}\n\n\t// check to see if the \"autofail\" label on the pgcluster CR has been changed from either true to false, or from\n\t// false to true. If it has been changed to false, autofail will then be disabled in the pg cluster. If has\n\t// been changed to true, autofail will then be enabled in the pg cluster\n\tif newcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL] != \"\" {\n\t\tautofailEnabledOld, err := strconv.ParseBool(oldcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL])\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\tautofailEnabledNew, err := strconv.ParseBool(newcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL])\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif autofailEnabledNew != autofailEnabledOld {\n\t\t\tutil.ToggleAutoFailover(c.Client, autofailEnabledNew,\n\t\t\t\tnewcluster.ObjectMeta.Labels[config.LABEL_PGHA_SCOPE],\n\t\t\t\tnewcluster.ObjectMeta.Namespace)\n\t\t}\n\n\t}\n\n\t// handle standby being enabled and disabled for the cluster\n\tif oldcluster.Spec.Standby && !newcluster.Spec.Standby {\n\t\tif err := clusteroperator.DisableStandby(c.Client, *newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t} else if !oldcluster.Spec.Standby && newcluster.Spec.Standby {\n\t\tif err := clusteroperator.EnableStandby(c.Client, *newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// see if any of the resource values have changed, and if so, update them\n\tif !reflect.DeepEqual(oldcluster.Spec.Resources, newcluster.Spec.Resources) ||\n\t\t!reflect.DeepEqual(oldcluster.Spec.Limits, newcluster.Spec.Limits) {\n\t\tif err := clusteroperator.UpdateResources(c.Client, c.Client.Config, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// see if any of the pgBackRest repository resource values have changed, and\n\t// if so, update them\n\tif !reflect.DeepEqual(oldcluster.Spec.BackrestResources, newcluster.Spec.BackrestResources) ||\n\t\t!reflect.DeepEqual(oldcluster.Spec.BackrestLimits, newcluster.Spec.BackrestLimits) {\n\t\tif err := backrestoperator.UpdateResources(c.Client, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// see if any of the pgBouncer values have changed, and if so, update the\n\t// pgBouncer deployment\n\tif !reflect.DeepEqual(oldcluster.Spec.PgBouncer, newcluster.Spec.PgBouncer) {\n\t\tif err := updatePgBouncer(c, oldcluster, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// if we are not in a standby state, check to see if the tablespaces have\n\t// differed, and if so, add the additional volumes to the primary and replicas\n\tif !reflect.DeepEqual(oldcluster.Spec.TablespaceMounts, newcluster.Spec.TablespaceMounts) {\n\t\tif err := updateTablespaces(c, oldcluster, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (mcr *MiddlewareClusterRepo) Update(middlewareCluster metadata.MiddlewareCluster) error {\n\tsql := `update t_meta_middleware_cluster_info set cluster_name = ?, owner_id = ?, env_id = ?, del_flag = ? where id = ?;`\n\tlog.Debugf(\"metadata MiddlewareClusterRepo.Update() update sql: %s\", sql)\n\t_, err := mcr.Execute(sql,\n\t\tmiddlewareCluster.GetClusterName(),\n\t\tmiddlewareCluster.GetOwnerID(),\n\t\tmiddlewareCluster.GetEnvID(),\n\t\tmiddlewareCluster.GetDelFlag(),\n\t\tmiddlewareCluster.Identity(),\n\t)\n\n\treturn err\n}", "func (c *Cluster) Update(ctx context.Context) error {\n\tif err := c.restore(); err != nil {\n\t\treturn err\n\t}\n\n\tif c.Status == Error {\n\t\tlogrus.Errorf(\"Cluster %s previously failed to create\", c.Name)\n\t\treturn c.Create(ctx)\n\t}\n\n\tif c.Status == PreCreating || c.Status == Creating {\n\t\tlogrus.Errorf(\"Cluster %s has not been created.\", c.Name)\n\t\treturn fmt.Errorf(\"cluster %s has not been created\", c.Name)\n\t}\n\n\tdriverOpts, err := c.ConfigGetter.GetConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdriverOpts.StringOptions[\"name\"] = c.Name\n\n\tfor k, v := range c.Metadata {\n\t\tif k == \"state\" {\n\t\t\tstate := make(map[string]interface{})\n\t\t\tif err := json.Unmarshal([]byte(v), &state); err == nil {\n\t\t\t\tflattenIfNotExist(state, &driverOpts)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tdriverOpts.StringOptions[k] = v\n\t}\n\n\tif err := c.PersistStore.PersistStatus(*c, Updating); err != nil {\n\t\treturn err\n\t}\n\n\tinfo := toInfo(c)\n\tinfo, err = c.Driver.Update(ctx, info, &driverOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttransformClusterInfo(c, info)\n\n\treturn c.PostCheck(ctx)\n}", "func (m *CDatabase) Update(cluster Cluster) error {\n\terr := db.C(COLLECTION).UpdateId(cluster.ID, &cluster)\n\treturn err\n}", "func Update(setValuesFlag, valuesYamlFile, chartLocation, version string) error {\n\t_ = utils.CreateDirIfNotExist(utils.GetSpaceCloudDirectory())\n\n\tcharList, err := utils.HelmList(model.HelmSpaceCloudNamespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(charList) < 1 {\n\t\tutils.LogInfo(\"Space cloud cluster not found, setup a new cluster using the setup command\")\n\t\treturn nil\n\t}\n\n\tclusterID := charList[0].Name\n\tisOk := false\n\tprompt := &survey.Confirm{\n\t\tMessage: fmt.Sprintf(\"Space cloud cluster with id (%s) will be upgraded, Do you want to continue\", clusterID),\n\t}\n\tif err := survey.AskOne(prompt, &isOk); err != nil {\n\t\treturn err\n\t}\n\tif !isOk {\n\t\treturn nil\n\t}\n\n\tvaluesFileObj, err := utils.ExtractValuesObj(setValuesFlag, valuesYamlFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set clusterId of existing cluster\n\tcharInfo, err := utils.HelmGet(clusterID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvaluesFileObj[\"clusterId\"] = charInfo.Config[\"clusterId\"]\n\n\t_, err = utils.HelmUpgrade(clusterID, chartLocation, utils.GetHelmChartDownloadURL(model.HelmSpaceCloudChartDownloadURL, version), \"\", valuesFileObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println()\n\tutils.LogInfo(fmt.Sprintf(\"Space Cloud (cluster id: \\\"%s\\\") has been successfully upgraded! 👍\", charList[0].Name))\n\treturn nil\n}", "func (c *AKSCluster) UpdateCluster(request *bTypes.UpdateClusterRequest) error {\n\tlog := logger.WithFields(logrus.Fields{\"action\": constants.TagUpdateCluster})\n\tclient, err := c.GetAKSClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.With(log.Logger)\n\n\t// send separate requests because Azure not supports multiple nodepool modification\n\t// Azure not supports adding and deleting nodepools\n\tvar nodePoolAfterUpdate []*model.AzureNodePoolModel\n\tvar updatedCluster *banzaiAzureTypes.ResponseWithValue\n\tif requestNodes := request.Azure.NodePools; requestNodes != nil {\n\t\tfor name, np := range requestNodes {\n\t\t\tif existNodePool := c.getExistingNodePoolByName(name); np != nil && existNodePool != nil {\n\t\t\t\tlog.Infof(\"NodePool is exists[%s], update...\", name)\n\n\t\t\t\tcount := int32(np.Count)\n\n\t\t\t\t// create request model for aks-client\n\t\t\t\tccr := azureCluster.CreateClusterRequest{\n\t\t\t\t\tName: c.modelCluster.Name,\n\t\t\t\t\tLocation: c.modelCluster.Location,\n\t\t\t\t\tResourceGroup: c.modelCluster.Azure.ResourceGroup,\n\t\t\t\t\tKubernetesVersion: c.modelCluster.Azure.KubernetesVersion,\n\t\t\t\t\tProfiles: []containerservice.AgentPoolProfile{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: &name,\n\t\t\t\t\t\t\tCount: &count,\n\t\t\t\t\t\t\tVMSize: containerservice.VMSizeTypes(existNodePool.NodeInstanceType),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tnodePoolAfterUpdate = append(nodePoolAfterUpdate, &model.AzureNodePoolModel{\n\t\t\t\t\tID: existNodePool.ID,\n\t\t\t\t\tClusterModelId: existNodePool.ClusterModelId,\n\t\t\t\t\tName: name,\n\t\t\t\t\tCount: np.Count,\n\t\t\t\t\tNodeInstanceType: existNodePool.NodeInstanceType,\n\t\t\t\t})\n\n\t\t\t\tupdatedCluster, err = c.updateWithPolling(client, &ccr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"There's no nodepool with this name[%s]\", name)\n\t\t\t}\n\t\t}\n\t}\n\n\tif updatedCluster != nil {\n\t\tupdateCluster := &model.ClusterModel{\n\t\t\tModel: c.modelCluster.Model,\n\t\t\tName: c.modelCluster.Name,\n\t\t\tLocation: c.modelCluster.Location,\n\t\t\tNodeInstanceType: c.modelCluster.NodeInstanceType,\n\t\t\tCloud: c.modelCluster.Cloud,\n\t\t\tOrganizationId: c.modelCluster.OrganizationId,\n\t\t\tSecretId: c.modelCluster.SecretId,\n\t\t\tStatus: c.modelCluster.Status,\n\t\t\tAzure: model.AzureClusterModel{\n\t\t\t\tResourceGroup: c.modelCluster.Azure.ResourceGroup,\n\t\t\t\tKubernetesVersion: c.modelCluster.Azure.KubernetesVersion,\n\t\t\t\tNodePools: nodePoolAfterUpdate,\n\t\t\t},\n\t\t}\n\t\tc.modelCluster = updateCluster\n\t\tc.azureCluster = &updatedCluster.Value\n\t}\n\n\treturn nil\n}", "func (th *transitionHandler) PostRefreshCluster(reason string) stateswitch.PostTransition {\n\tret := func(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error {\n\t\tsCluster, ok := sw.(*stateCluster)\n\t\tif !ok {\n\t\t\treturn errors.New(\"PostRefreshCluster incompatible type of StateSwitch\")\n\t\t}\n\t\tparams, ok := args.(*TransitionArgsRefreshCluster)\n\t\tif !ok {\n\t\t\treturn errors.New(\"PostRefreshCluster invalid argument\")\n\t\t}\n\n\t\tvar (\n\t\t\terr error\n\t\t\tupdatedCluster *common.Cluster\n\t\t)\n\t\t//update cluster record if the state or the reason has changed\n\t\tif sCluster.srcState != swag.StringValue(sCluster.cluster.Status) || reason != swag.StringValue(sCluster.cluster.StatusInfo) {\n\t\t\tvar extra []interface{}\n\t\t\tvar log = logutil.FromContext(params.ctx, th.log)\n\t\t\textra, err = addExtraParams(log, sCluster.cluster, sCluster.srcState)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tupdatedCluster, err = updateClusterStatus(params.ctx, log, params.db, th.stream, *sCluster.cluster.ID, sCluster.srcState, *sCluster.cluster.Status,\n\t\t\t\treason, params.eventHandler, extra...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t//update hosts status to models.HostStatusResettingPendingUserAction if needed\n\t\tcluster := sCluster.cluster\n\t\tif updatedCluster != nil {\n\t\t\tcluster = updatedCluster\n\t\t\tparams.updatedCluster = updatedCluster\n\t\t}\n\t\tsetPendingUserResetIfNeeded(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, params.hostApi, cluster)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t//report cluster install duration metrics in case of an installation halt. Cancel and Installed cases are\n\t\t//treated separately in CancelInstallation and CompleteInstallation respectively\n\t\tif sCluster.srcState != swag.StringValue(sCluster.cluster.Status) &&\n\t\t\tsCluster.srcState != models.ClusterStatusInstallingPendingUserAction &&\n\t\t\tfunk.ContainsString([]string{models.ClusterStatusError, models.ClusterStatusInstallingPendingUserAction}, swag.StringValue(sCluster.cluster.Status)) {\n\n\t\t\tparams.metricApi.ClusterInstallationFinished(params.ctx, *sCluster.cluster.Status, sCluster.srcState,\n\t\t\t\tsCluster.cluster.OpenshiftVersion, *sCluster.cluster.ID, sCluster.cluster.EmailDomain,\n\t\t\t\tsCluster.cluster.InstallStartedAt)\n\t\t}\n\t\treturn nil\n\t}\n\treturn ret\n}", "func (r *ProjectsInstancesClustersService) Update(name string, cluster *Cluster) *ProjectsInstancesClustersUpdateCall {\n\tc := &ProjectsInstancesClustersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\tc.cluster = cluster\n\treturn c\n}", "func cleanCluster(req *restful.Request, clusterID string) error {\n\t// 参数\n\tdata := operator.M{\n\t\tclusterIDTag: \"\",\n\t\tupdateTimeTag: time.Now(),\n\t}\n\tcondition := operator.NewLeafCondition(operator.Eq, operator.M{clusterIDTag: clusterID})\n\treturn UpdateMany(req.Request.Context(), tableName, condition, data)\n}", "func (a *ClustersApiService) UpdateClusterExecute(r ApiUpdateClusterRequest) (UpdateClusterResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue UpdateClusterResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ClustersApiService.UpdateCluster\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/spaces/{space}/clusters/{cluster-id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"space\"+\"}\", _neturl.PathEscape(parameterToString(r.space, \"\")), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"cluster-id\"+\"}\", _neturl.PathEscape(parameterToString(r.clusterId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\tif r.updateCluster == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"updateCluster is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.updateCluster\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (c *Controller) processCluster(updateCtx context.Context, workerNum uint, clusterInfo *ClusterInfo) {\n\tdefer c.clusterList.ClusterProcessed(clusterInfo)\n\n\tcluster := clusterInfo.Cluster\n\tclusterLog := c.logger.WithField(\"cluster\", cluster.Alias).WithField(\"worker\", workerNum)\n\n\tclusterLog.Infof(\"Processing cluster (%s)\", cluster.LifecycleStatus)\n\n\terr := c.doProcessCluster(updateCtx, clusterLog, clusterInfo)\n\n\t// log the error and resolve the special error cases\n\tif err != nil {\n\t\tclusterLog.Errorf(\"Failed to process cluster: %s\", err)\n\n\t\t// treat \"provider not supported\" as no error\n\t\tif err == provisioner.ErrProviderNotSupported {\n\t\t\terr = nil\n\t\t}\n\t} else {\n\t\tclusterLog.Infof(\"Finished processing cluster\")\n\t}\n\n\t// update the cluster state in the registry\n\tif !c.dryRun {\n\t\tif err != nil {\n\t\t\tif cluster.Status.Problems == nil {\n\t\t\t\tcluster.Status.Problems = make([]*api.Problem, 0, 1)\n\t\t\t}\n\t\t\tcluster.Status.Problems = append(cluster.Status.Problems, &api.Problem{\n\t\t\t\tTitle: err.Error(),\n\t\t\t\tType: errTypeGeneral,\n\t\t\t})\n\n\t\t\tif len(cluster.Status.Problems) > errorLimit {\n\t\t\t\tcluster.Status.Problems = cluster.Status.Problems[len(cluster.Status.Problems)-errorLimit:]\n\t\t\t\tcluster.Status.Problems[0] = &api.Problem{\n\t\t\t\t\tType: errTypeCoalescedProblems,\n\t\t\t\t\tTitle: \"<multiple problems>\",\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tcluster.Status.Problems = []*api.Problem{}\n\t\t}\n\t\terr = c.registry.UpdateCluster(cluster)\n\t\tif err != nil {\n\t\t\tclusterLog.Errorf(\"Unable to update cluster state: %s\", err)\n\t\t}\n\t}\n}", "func (api *clusterAPI) Update(obj *cluster.Cluster) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.ClusterV1().Cluster().Update(context.Background(), obj)\n\t\treturn err\n\t}\n\n\tapi.ct.handleClusterEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Updated})\n\treturn nil\n}", "func UpdateColor(c *fiber.Ctx) error {\n\treturn c.JSON(\"Update color\")\n}", "func (a *Client) V2UpdateCluster(ctx context.Context, params *V2UpdateClusterParams) (*V2UpdateClusterCreated, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V2UpdateCluster\",\n\t\tMethod: \"PATCH\",\n\t\tPathPattern: \"/v2/clusters/{cluster_id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V2UpdateClusterReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V2UpdateClusterCreated), nil\n\n}", "func ExampleSnowball_UpdateCluster_shared00() {\n\tsvc := snowball.New(session.New())\n\tinput := &snowball.UpdateClusterInput{\n\t\tAddressId: aws.String(\"ADID1234ab12-3eec-4eb3-9be6-9374c10eb51b\"),\n\t\tClusterId: aws.String(\"CID123e4567-e89b-12d3-a456-426655440000\"),\n\t\tDescription: aws.String(\"updated-cluster-name\"),\n\t}\n\n\tresult, err := svc.UpdateCluster(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase snowball.ErrCodeInvalidResourceException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidResourceException, aerr.Error())\n\t\t\tcase snowball.ErrCodeInvalidJobStateException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidJobStateException, aerr.Error())\n\t\t\tcase snowball.ErrCodeKMSRequestFailedException:\n\t\t\t\tfmt.Println(snowball.ErrCodeKMSRequestFailedException, aerr.Error())\n\t\t\tcase snowball.ErrCodeInvalidInputCombinationException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidInputCombinationException, aerr.Error())\n\t\t\tcase snowball.ErrCodeEc2RequestFailedException:\n\t\t\t\tfmt.Println(snowball.ErrCodeEc2RequestFailedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (client RoverClusterClient) updateRoverCluster(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/roverClusters/{roverClusterId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateRoverClusterResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/rover/20201210/RoverCluster/UpdateRoverCluster\"\n\t\terr = common.PostProcessServiceError(err, \"RoverCluster\", \"UpdateRoverCluster\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func UpdateK3SRKE2Cluster(client *rancher.Client, cluster *v1.SteveAPIObject, updatedCluster *apisV1.Cluster) (*v1.SteveAPIObject, error) {\n\tupdateCluster, err := client.Steve.SteveType(ProvisioningSteveResouceType).ByID(cluster.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tupdatedCluster.ObjectMeta.ResourceVersion = updateCluster.ObjectMeta.ResourceVersion\n\n\tlogrus.Infof(\"Applying cluster YAML hardening changes...\")\n\tcluster, err = client.Steve.SteveType(ProvisioningSteveResouceType).Update(cluster, updatedCluster)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = kwait.Poll(500*time.Millisecond, 5*time.Minute, func() (done bool, err error) {\n\t\tclient, err = client.ReLogin()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tclusterResp, err := client.Steve.SteveType(ProvisioningSteveResouceType).ByID(cluster.ID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif clusterResp.ObjectMeta.State.Name == \"active\" {\n\t\t\tlogrus.Infof(\"Cluster YAML has successfully been updated!\")\n\t\t\treturn true, nil\n\t\t} else {\n\t\t\treturn false, nil\n\t\t}\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cluster, nil\n}", "func (a *HyperflexApiService) UpdateHyperflexCluster(ctx context.Context, moid string) ApiUpdateHyperflexClusterRequest {\n\treturn ApiUpdateHyperflexClusterRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (mr *MockRdbClientMockRecorder) UpdateCluster(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateCluster\", reflect.TypeOf((*MockRdbClient)(nil).UpdateCluster), varargs...)\n}", "func ExampleRDS_ModifyDBCluster_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.ModifyDBClusterInput{\n\t\tApplyImmediately: aws.Bool(true),\n\t\tBackupRetentionPeriod: aws.Int64(14),\n\t\tDBClusterIdentifier: aws.String(\"cluster-2\"),\n\t\tMasterUserPassword: aws.String(\"newpassword99\"),\n\t}\n\n\tresult, err := svc.ModifyDBCluster(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBClusterNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeStorageQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeStorageQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBSubnetGroupNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSubnetGroupNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidVPCNetworkStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidVPCNetworkStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBSubnetGroupStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBSubnetGroupStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidSubnet:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidSubnet, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterParameterGroupNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterParameterGroupNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBSecurityGroupStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBSecurityGroupStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBInstanceStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBInstanceStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBInstanceAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBInstanceAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDomainNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDomainNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeStorageTypeNotAvailableFault:\n\t\t\t\tfmt.Println(rds.ErrCodeStorageTypeNotAvailableFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (h *Handler) serveUpdateClusterAdmin(w http.ResponseWriter, r *http.Request) {}", "func (us *ClusterStore) Update(u *model.Cluster) error {\n\treturn us.db.Save(u).Error\n}", "func UpdatePravegaCluster(t *testing.T, k8client client.Client, p *api.PravegaCluster) error {\n\tlog.Printf(\"updating pravega cluster: %s\", p.Name)\n\tp.Spec.Pravega.RollbackTimeout = 10\n\terr := k8client.Update(goctx.TODO(), p)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to update CR: %v\", err)\n\t}\n\n\tlog.Printf(\"updated pravega cluster: %s\", p.Name)\n\treturn nil\n}", "func (m *Multicluster) ClusterUpdated(cluster *multicluster.Cluster, stop <-chan struct{}) {\n\tm.m.Lock()\n\tm.deleteCluster(cluster.ID)\n\tkubeController, kubeRegistry, options, configCluster := m.addCluster(cluster)\n\tif kubeController == nil {\n\t\t// m.closing was true, nothing to do.\n\t\tm.m.Unlock()\n\t\treturn\n\t}\n\tm.m.Unlock()\n\t// clusterStopCh is a channel that will be closed when this cluster removed.\n\tm.initializeCluster(cluster, kubeController, kubeRegistry, *options, configCluster, stop)\n}", "func UpgradeCluster(ctx context.Context, clusterClient containerservice.ManagedClustersClient, parameters ClusterParameters) (status string, err error) {\n\tresourceGroupName := parameters.Name + \"-group\"\n\n\t// Get the location from cluster properties\n\tc, err := clusterClient.Get(ctx, resourceGroupName, parameters.Name)\n\tif err != nil {\n\t\tfmt.Printf(\"Error getting location for cluster %v: %v\\n\", parameters.Name, err)\n\t}\n\n\t// check cluster status before upgrading\n\tif *c.ProvisioningState != \"Succeeded\" {\n\t\treturn \"\", fmt.Errorf(\"Unable to upgrade cluster while it is currently %v\", *c.ProvisioningState)\n\t}\n\n\tfuture, err := clusterClient.CreateOrUpdate(\n\t\tctx,\n\t\tresourceGroupName,\n\t\tparameters.Name,\n\t\tcontainerservice.ManagedCluster{\n\t\t\tLocation: c.Location,\n\t\t\tManagedClusterProperties: &containerservice.ManagedClusterProperties{\n\t\t\t\tKubernetesVersion: &parameters.KubernetesVersion,\n\t\t\t},\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot upgrade cluster: %v\", err)\n\t}\n\n\tstatus = future.Status()\n\tif status != \"Upgrading\" {\n\t\treturn \"\", fmt.Errorf(\"cannot upgrade cluster: %v\", status)\n\t}\n\n\treturn status, nil\n}", "func (ctrler CtrlDefReactor) OnClusterUpdate(oldObj *Cluster, newObj *cluster.Cluster) error {\n\tlog.Info(\"OnClusterUpdate is not implemented\")\n\treturn nil\n}", "func (c *TkgClient) UpdateCredentialsCluster(options *UpdateCredentialsOptions) error {\n\tcurrentRegion, err := c.GetCurrentRegionContext()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot get current management cluster context\")\n\t}\n\toptions.Kubeconfig = currentRegion.SourceFilePath\n\n\tlog.V(4).Info(\"Creating management cluster client...\")\n\tregionalClusterClient, err := clusterclient.NewClient(currentRegion.SourceFilePath, currentRegion.ContextName, clusterclient.Options{OperationTimeout: c.timeout})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to get cluster client while upgrading cluster\")\n\t}\n\n\tisPacific, err := regionalClusterClient.IsPacificRegionalCluster()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error determining 'Tanzu Kubernetes Cluster service for vSphere' management cluster\")\n\t}\n\tif isPacific {\n\t\treturn errors.New(\"update operation not supported for 'Tanzu Kubernetes Service' clusters\")\n\t}\n\n\tinfraProvider, err := regionalClusterClient.GetRegionalClusterDefaultProviderName(clusterctlv1.InfrastructureProviderType)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get cluster provider information.\")\n\t}\n\tinfraProviderName, _, err := ParseProviderName(infraProvider)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse provider name\")\n\t}\n\n\tlog.Infof(\"Updating credentials for workload cluster %q\", options.ClusterName)\n\tif infraProviderName == VSphereProviderName {\n\t\tif err := c.UpdateVSphereClusterCredentials(regionalClusterClient, options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// update operation is supported only on vsphere clusters for now\n\tif infraProviderName != VSphereProviderName {\n\t\treturn errors.New(\"Updating '\" + infraProviderName + \"' cluster is not yet supported\")\n\t}\n\n\tlog.Infof(\"Updating credentials for workload cluster successful!\")\n\treturn nil\n}", "func (mcs *MySQLClusterService) Update(id int, fields map[string]interface{}) error {\n\terr := mcs.GetByID(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mcs.MySQLClusters[constant.ZeroInt].Set(fields)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn mcs.MySQLClusterRepo.Update(mcs.MySQLClusters[constant.ZeroInt])\n}", "func (u *osdUpgrader) UpgradeCluster(ctx context.Context, upgradeConfig *upgradev1alpha1.UpgradeConfig, logger logr.Logger) (upgradev1alpha1.UpgradePhase, error) {\n\tu.upgradeConfig = upgradeConfig\n\n\t// OSD upgrader enforces a 'failure' policy if the upgrade does not commence within a time period\n\tif cancelUpgrade, _ := shouldFailUpgrade(u.cvClient, u.config, u.upgradeConfig); cancelUpgrade {\n\t\treturn performUpgradeFailure(u.client, u.metrics, u.scaler, u.notifier, u.upgradeConfig, logger)\n\t}\n\n\treturn u.runSteps(ctx, logger, u.steps)\n}", "func (c *Controller) OnUpdate(old, new common.Cluster) {\n\tblog.Infof(\"cluster old %+v new %+v\", old, new)\n\tif _, ok := c.reconcilerMap[new.ClusterID]; !ok {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tnewReconciler, err := reconciler.NewReconciler(new, c.storageClient, c.cmdbClient, c.ops.FullSyncInterval)\n\t\tif err != nil {\n\t\t\tblog.Errorf(\"failed, to create new reconciler, err %s\", err.Error())\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t\tblog.Infof(\"add reconciler for cluster %+v\", new)\n\t\tc.reconcilerMap[new.ClusterID] = newReconciler\n\t\tc.cancelFuncMap[new.ClusterID] = cancel\n\t\tgo newReconciler.Run(ctx)\n\t} else {\n\t\tblog.Infof(\"delete old reconciler for %+v\", old)\n\t\t// call cancel function\n\t\tc.cancelFuncMap[old.ClusterID]()\n\t\tdelete(c.cancelFuncMap, old.ClusterID)\n\t\tdelete(c.reconcilerMap, old.ClusterID)\n\n\t\tblog.Infof(\"add new reconciler for %+v\", new)\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tnewReconciler, err := reconciler.NewReconciler(new, c.storageClient, c.cmdbClient, c.ops.FullSyncInterval)\n\t\tif err != nil {\n\t\t\tblog.Errorf(\"failed, to create new reconciler, err %s\", err.Error())\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t\tc.reconcilerMap[new.ClusterID] = newReconciler\n\t\tc.cancelFuncMap[new.ClusterID] = cancel\n\t\tgo newReconciler.Run(ctx)\n\t}\n}", "func (r *ClusteringHandler) HandleUpdatedClusters(ctx context.Context, updates *clustering.Update, commitTime time.Time) error {\n\trowUpdates := prepareInserts(updates, commitTime)\n\tif err := r.cfClient.Insert(ctx, rowUpdates); err != nil {\n\t\treturn errors.Annotate(err, \"inserting %d clustered failure rows\", len(rowUpdates)).Err()\n\t}\n\treturn nil\n}", "func (i ClusterInstance) UpdateClusterMembers(log *logging.Logger, members ClusterMemberList) error {\n\tif _, err := i.runRemoteCommand(log, \"sudo /usr/bin/mkdir -p /etc/pulcy\", \"\", false); err != nil {\n\t\treturn maskAny(err)\n\t}\n\tdata := members.Render()\n\tif _, err := i.runRemoteCommand(log, \"sudo tee /etc/pulcy/cluster-members\", data, false); err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\tlog.Infof(\"Restarting gluon on %s\", i)\n\tif _, err := i.runRemoteCommand(log, fmt.Sprintf(\"sudo systemctl restart gluon.service\"), \"\", false); err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\tlog.Infof(\"Enabling services on %s\", i)\n\tservices := []string{\"etcd2.service\", \"fleet.service\", \"fleet.socket\", \"ip4tables.service\", \"ip6tables.service\"}\n\tfor _, service := range services {\n\t\tif err := i.EnableService(log, service); err != nil {\n\t\t\treturn maskAny(err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *Server) updateRemoteClusterStatus(ctx context.Context, netConfig types.ClusterNetworkingConfig, remoteCluster types.RemoteCluster) (updated bool, err error) {\n\tkeepAliveCountMax := netConfig.GetKeepAliveCountMax()\n\tkeepAliveInterval := netConfig.GetKeepAliveInterval()\n\n\t// fetch tunnel connections for the cluster to update runtime status\n\tconnections, err := a.GetTunnelConnections(remoteCluster.GetName())\n\tif err != nil {\n\t\treturn false, trace.Wrap(err)\n\t}\n\tlastConn, err := services.LatestTunnelConnection(connections)\n\tif err != nil {\n\t\tif !trace.IsNotFound(err) {\n\t\t\treturn false, trace.Wrap(err)\n\t\t}\n\t\t// No tunnel connections are known, mark the cluster offline (if it\n\t\t// wasn't already).\n\t\tif remoteCluster.GetConnectionStatus() != teleport.RemoteClusterStatusOffline {\n\t\t\tremoteCluster.SetConnectionStatus(teleport.RemoteClusterStatusOffline)\n\t\t\tif err := a.UpdateRemoteCluster(ctx, remoteCluster); err != nil {\n\t\t\t\t// if the cluster was concurrently updated, ignore the update. either\n\t\t\t\t// the update was consistent with our view of the world, in which case\n\t\t\t\t// retrying would be pointless, or the update was not consistent, in which\n\t\t\t\t// case we should prioritize presenting our view in an internally-consistent\n\t\t\t\t// manner rather than competing with another task.\n\t\t\t\tif !trace.IsCompareFailed(err) {\n\t\t\t\t\treturn false, trace.Wrap(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\n\tofflineThreshold := time.Duration(keepAliveCountMax) * keepAliveInterval\n\ttunnelStatus := services.TunnelConnectionStatus(a.clock, lastConn, offlineThreshold)\n\n\t// Update remoteCluster based on lastConn. If anything changed, update it\n\t// in the backend too.\n\tprevConnectionStatus := remoteCluster.GetConnectionStatus()\n\tprevLastHeartbeat := remoteCluster.GetLastHeartbeat()\n\tremoteCluster.SetConnectionStatus(tunnelStatus)\n\t// Only bump LastHeartbeat if it's newer.\n\tif lastConn.GetLastHeartbeat().After(prevLastHeartbeat) {\n\t\tremoteCluster.SetLastHeartbeat(lastConn.GetLastHeartbeat().UTC())\n\t}\n\tif prevConnectionStatus != remoteCluster.GetConnectionStatus() || !prevLastHeartbeat.Equal(remoteCluster.GetLastHeartbeat()) {\n\t\tif err := a.UpdateRemoteCluster(ctx, remoteCluster); err != nil {\n\t\t\t// if the cluster was concurrently updated, ignore the update. either\n\t\t\t// the update was consistent with our view of the world, in which case\n\t\t\t// retrying would be pointless, or the update was not consistent, in which\n\t\t\t// case we should prioritize presenting our view in an internally-consistent\n\t\t\t// manner rather than competing with another task.\n\t\t\tif !trace.IsCompareFailed(err) {\n\t\t\t\treturn false, trace.Wrap(err)\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}", "func (c *clusterNetwork) Update(cn *sdnapi.ClusterNetwork) (result *sdnapi.ClusterNetwork, err error) {\n\tresult = &sdnapi.ClusterNetwork{}\n\terr = c.r.Put().Resource(\"clusterNetworks\").Name(cn.Name).Body(cn).Do().Into(result)\n\treturn\n}", "func (s *ResourceCollectorProtocolServer) UpdateClusterStatus(ctx context.Context, in *pb.ClusterState) (*pb.ReturnMessageClusterState, error) {\n\tklog.Infof(\"Received Request - Update Status: %v\", in)\n\tns := in.NameSpace\n\tname := in.Name\n\tstate := in.State\n\tklog.Infof(\"received state: %v, %v, %v\", ns, name, state)\n\treturn &pb.ReturnMessageClusterState{NameSpace: ns, Name: name, ReturnCode: 1}, nil\n}", "func (c *krakenClusters) Update(krakenCluster *v1alpha1.KrakenCluster) (result *v1alpha1.KrakenCluster, err error) {\n\tresult = &v1alpha1.KrakenCluster{}\n\terr = c.client.Put().\n\t\tNamespace(c.ns).\n\t\tResource(\"krakenclusters\").\n\t\tName(krakenCluster.Name).\n\t\tBody(krakenCluster).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func Update(ctx context.Context, client *v1.ServiceClient, clusterID, nodegroupID string, opts *UpdateOpts) (*v1.ResponseResult, error) {\n\tupdateNodegroupOpts := struct {\n\t\tNodegroup *UpdateOpts `json:\"nodegroup\"`\n\t}{\n\t\tNodegroup: opts,\n\t}\n\trequestBody, err := json.Marshal(updateNodegroupOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\turl := strings.Join([]string{client.Endpoint, v1.ResourceURLCluster, clusterID, v1.ResourceURLNodegroup, nodegroupID}, \"/\")\n\tresponseResult, err := client.DoRequest(ctx, http.MethodPut, url, bytes.NewReader(requestBody))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif responseResult.Err != nil {\n\t\terr = responseResult.Err\n\t}\n\n\treturn responseResult, err\n}", "func (a *ClustersApiService) UpdateCluster(ctx _context.Context, space string, clusterId string) ApiUpdateClusterRequest {\n\treturn ApiUpdateClusterRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tspace: space,\n\t\tclusterId: clusterId,\n\t}\n}", "func (us *ClusterStore) UpdateClusterDetails(u *model.Cluster, details *model.ClusterDetails) (err error) {\n\treturn us.db.Model(&u).Association(\"ClusterDetails\").Append(details)\n}", "func (s *ClusterStorage) Update(ctx context.Context, cluster *types.Cluster) error {\n\n\tif cluster == nil {\n\t\treturn errors.New(store.ErrStructArgIsNil)\n\t}\n\n\tlog.V(logLevel).Debugf(\"storage:etcd:cluster:> update: %v\", cluster)\n\n\tcluster.Meta.Updated = time.Now()\n\n\tclient, destroy, err := getClient(ctx)\n\tif err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:cluster:> update err: %s\", err.Error())\n\t\treturn err\n\t}\n\tdefer destroy()\n\n\tkeyMeta := keyCreate(clusterStorage, \"meta\")\n\tif err := client.Update(ctx, keyMeta, cluster.Meta, nil, 0); err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:cluster:> update err: %s\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (adm Admin) AddCluster(cluster string) bool {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer conn.Disconnect()\n\n\tkb := KeyBuilder{cluster}\n\t// c = \"/<cluster>\"\n\tc := kb.cluster()\n\n\t// check if cluster already exists\n\texists, err := conn.Exists(c)\n\tmust(err)\n\tif exists {\n\t\treturn false\n\t}\n\n\tconn.CreateEmptyNode(c)\n\n\t// PROPERTYSTORE is an empty node\n\tpropertyStore := fmt.Sprintf(\"/%s/PROPERTYSTORE\", cluster)\n\tconn.CreateEmptyNode(propertyStore)\n\n\t// STATEMODELDEFS has 6 children\n\tstateModelDefs := fmt.Sprintf(\"/%s/STATEMODELDEFS\", cluster)\n\tconn.CreateEmptyNode(stateModelDefs)\n\tconn.CreateRecordWithData(stateModelDefs+\"/LeaderStandby\", HelixDefaultNodes[\"LeaderStandby\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/MasterSlave\", HelixDefaultNodes[\"MasterSlave\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/OnlineOffline\", HelixDefaultNodes[\"OnlineOffline\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/STORAGE_DEFAULT_SM_SCHEMATA\", HelixDefaultNodes[\"STORAGE_DEFAULT_SM_SCHEMATA\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/SchedulerTaskQueue\", HelixDefaultNodes[\"SchedulerTaskQueue\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/Task\", HelixDefaultNodes[\"Task\"])\n\n\t// INSTANCES is initailly an empty node\n\tinstances := fmt.Sprintf(\"/%s/INSTANCES\", cluster)\n\tconn.CreateEmptyNode(instances)\n\n\t// CONFIGS has 3 children: CLUSTER, RESOURCE, PARTICIPANT\n\tconfigs := fmt.Sprintf(\"/%s/CONFIGS\", cluster)\n\tconn.CreateEmptyNode(configs)\n\tconn.CreateEmptyNode(configs + \"/PARTICIPANT\")\n\tconn.CreateEmptyNode(configs + \"/RESOURCE\")\n\tconn.CreateEmptyNode(configs + \"/CLUSTER\")\n\n\tclusterNode := NewRecord(cluster)\n\tconn.CreateRecordWithPath(configs+\"/CLUSTER/\"+cluster, clusterNode)\n\n\t// empty ideal states\n\tidealStates := fmt.Sprintf(\"/%s/IDEALSTATES\", cluster)\n\tconn.CreateEmptyNode(idealStates)\n\n\t// empty external view\n\texternalView := fmt.Sprintf(\"/%s/EXTERNALVIEW\", cluster)\n\tconn.CreateEmptyNode(externalView)\n\n\t// empty live instances\n\tliveInstances := fmt.Sprintf(\"/%s/LIVEINSTANCES\", cluster)\n\tconn.CreateEmptyNode(liveInstances)\n\n\t// CONTROLLER has four childrens: [ERRORS, HISTORY, MESSAGES, STATUSUPDATES]\n\tcontroller := fmt.Sprintf(\"/%s/CONTROLLER\", cluster)\n\tconn.CreateEmptyNode(controller)\n\tconn.CreateEmptyNode(controller + \"/ERRORS\")\n\tconn.CreateEmptyNode(controller + \"/HISTORY\")\n\tconn.CreateEmptyNode(controller + \"/MESSAGES\")\n\tconn.CreateEmptyNode(controller + \"/STATUSUPDATES\")\n\n\treturn true\n}", "func (cu osdClusterUpgrader) UpgradeCluster(upgradeConfig *upgradev1alpha1.UpgradeConfig, logger logr.Logger) (upgradev1alpha1.UpgradePhase, *upgradev1alpha1.UpgradeCondition, error) {\n\tlogger.Info(\"Upgrading cluster\")\n\n\t// Determine if the upgrade has reached conditions warranting failure\n\tcancelUpgrade, _ := shouldFailUpgrade(cu.cvClient, cu.cfg, upgradeConfig)\n\tif cancelUpgrade {\n\n\t\t// Perform whatever actions are needed in the event of an upgrade failure\n\t\terr := performUpgradeFailure(cu.client, cu.metrics, cu.scaler, cu.notifier, upgradeConfig, logger)\n\n\t\t// If we couldn't notify of failure - do nothing, return the existing phase, try again next time\n\t\tif err != nil {\n\t\t\th := upgradeConfig.Status.History.GetHistory(upgradeConfig.Spec.Desired.Version)\n\t\t\tcondition := newUpgradeCondition(\"Upgrade failed\", \"FailedUpgrade notification sent\", \"FailedUpgrade\", corev1.ConditionFalse)\n\t\t\treturn h.Phase, condition, nil\n\t\t}\n\n\t\tlogger.Info(\"Failing upgrade\")\n\t\tcondition := newUpgradeCondition(\"Upgrade failed\", \"FailedUpgrade notification sent\", \"FailedUpgrade\", corev1.ConditionTrue)\n\t\treturn upgradev1alpha1.UpgradePhaseFailed, condition, nil\n\t}\n\n\tfor _, key := range cu.Ordering {\n\n\t\tlogger.Info(fmt.Sprintf(\"Performing %s\", key))\n\t\tresult, err := cu.Steps[key](cu.client, cu.cfg, cu.scaler, cu.drainstrategyBuilder, cu.metrics, cu.maintenance, cu.cvClient, cu.notifier, upgradeConfig, cu.machinery, cu.availabilityCheckers, logger)\n\n\t\tif err != nil {\n\t\t\tlogger.Error(err, fmt.Sprintf(\"Error when %s\", key))\n\t\t\tcondition := newUpgradeCondition(fmt.Sprintf(\"%s not done\", key), err.Error(), key, corev1.ConditionFalse)\n\t\t\treturn upgradev1alpha1.UpgradePhaseUpgrading, condition, err\n\t\t}\n\t\tif !result {\n\t\t\tlogger.Info(fmt.Sprintf(\"%s not done, skip following steps\", key))\n\t\t\tcondition := newUpgradeCondition(fmt.Sprintf(\"%s not done\", key), fmt.Sprintf(\"%s still in progress\", key), key, corev1.ConditionFalse)\n\t\t\treturn upgradev1alpha1.UpgradePhaseUpgrading, condition, nil\n\t\t}\n\t}\n\n\tkey := cu.Ordering[len(cu.Ordering)-1]\n\tcondition := newUpgradeCondition(fmt.Sprintf(\"%s done\", key), fmt.Sprintf(\"%s is completed\", key), key, corev1.ConditionTrue)\n\treturn upgradev1alpha1.UpgradePhaseUpgraded, condition, nil\n}", "func (m *MockRdbClient) UpdateCluster(arg0 context.Context, arg1 *v1alpha.UpdateClusterRequest, arg2 ...grpc.CallOption) (*v1alpha.Cluster, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"UpdateCluster\", varargs...)\n\tret0, _ := ret[0].(*v1alpha.Cluster)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (dn *Daemon) executeUpdateFromCluster() error {\n\treturn dn.executeUpdateFromClusterWithMachineConfig(nil)\n}", "func CreateCluster(request *restful.Request, response *restful.Response) {\n\tstart := time.Now()\n\n\tform := CreateClusterForm{}\n\t_ = request.ReadEntity(&form)\n\n\terr := utils.Validate.Struct(&form)\n\tif err != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\t_ = response.WriteHeaderAndEntity(400, utils.FormatValidationError(err))\n\t\treturn\n\t}\n\n\tuser := auth.GetUser(request)\n\tcluster := &models.BcsCluster{\n\t\tID: form.ClusterID,\n\t\tCreatorId: user.ID,\n\t}\n\tswitch form.ClusterType {\n\tcase \"k8s\":\n\t\tcluster.ClusterType = BcsK8sCluster\n\tcase \"mesos\":\n\t\tcluster.ClusterType = BcsMesosCluster\n\tcase \"tke\":\n\t\tcluster.ClusterType = BcsTkeCluster\n\t\tif form.TkeClusterID == \"\" || form.TkeClusterRegion == \"\" {\n\t\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\t\tblog.Warnf(\"create tke cluster failed, empty tke clusterid or region\")\n\t\t\tmessage := fmt.Sprintf(\"errcode: %d, create tke cluster failed, empty tke clusterid or region\", common.BcsErrApiBadRequest)\n\t\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\t\treturn\n\t\t}\n\t\tcluster.TkeClusterId = form.TkeClusterID\n\t\tcluster.TkeClusterRegion = form.TkeClusterRegion\n\tdefault:\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Warnf(\"create failed, cluster type invalid\")\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create failed, cluster type invalid\", common.BcsErrApiBadRequest)\n\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\treturn\n\t}\n\n\tclusterInDb := sqlstore.GetCluster(cluster.ID)\n\tif clusterInDb != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Warnf(\"create cluster failed, cluster [%s] already exist\", cluster.ID)\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create cluster failed, cluster [%s] already exist\", common.BcsErrApiBadRequest, cluster.ID)\n\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\treturn\n\t}\n\n\terr = sqlstore.CreateCluster(cluster)\n\tif err != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Errorf(\"failed to create cluster [%s]: %s\", cluster.ID, err.Error())\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create cluster [%s] failed, error: %s\", common.BcsErrApiInternalDbError, cluster.ID, err.Error())\n\t\tutils.WriteServerError(response, common.BcsErrApiInternalDbError, message)\n\t\treturn\n\t}\n\n\tdata := utils.CreateResponseData(nil, \"success\", *cluster)\n\t_, _ = response.Write([]byte(data))\n\n\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.SucStatus, start)\n}", "func (d *Dao) pingESCluster(ctx context.Context) (err error) {\n\t//for name, client := range d.ESPool {\n\t//\tif _, _, err = client.Ping(d.c.Es[name].Addr[0]).Do(ctx); err != nil {\n\t//\t\td.PromError(\"Es:Ping\", \"%s:Ping error(%v)\", name, err)\n\t//\t\treturn\n\t//\t}\n\t//}\n\treturn\n}", "func (clusterRequest *KibanaRequest) Update(object client.Object) error {\n\treturn clusterRequest.client.Update(context.TODO(), object)\n}", "func (client RoverClusterClient) UpdateRoverCluster(ctx context.Context, request UpdateRoverClusterRequest) (response UpdateRoverClusterResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.DefaultRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.updateRoverCluster, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = UpdateRoverClusterResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = UpdateRoverClusterResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(UpdateRoverClusterResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into UpdateRoverClusterResponse\")\n\t}\n\treturn\n}", "func PostClusterHealthCheck(c client.Client, cfg *osdUpgradeConfig, scaler scaler.Scaler, dsb drain.NodeDrainStrategyBuilder, metricsClient metrics.Metrics, m maintenance.Maintenance, cvClient cv.ClusterVersion, nc eventmanager.EventManager, upgradeConfig *upgradev1alpha1.UpgradeConfig, machinery machinery.Machinery, availabilityCheckers ac.AvailabilityCheckers, logger logr.Logger) (bool, error) {\n\tok, err := performClusterHealthCheck(c, metricsClient, cvClient, cfg, logger)\n\tif err != nil || !ok {\n\t\tmetricsClient.UpdateMetricClusterCheckFailed(upgradeConfig.Name)\n\t\treturn false, err\n\t}\n\n\tmetricsClient.UpdateMetricClusterCheckSucceeded(upgradeConfig.Name)\n\treturn true, nil\n}", "func (a *Client) UpgradeDatalakeCluster(params *UpgradeDatalakeClusterParams) (*UpgradeDatalakeClusterOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpgradeDatalakeClusterParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"upgradeDatalakeCluster\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/sdx/{name}/upgrade\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &UpgradeDatalakeClusterReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*UpgradeDatalakeClusterOK), nil\n\n}", "func (a ClustersAPI) Edit(editReq httpmodels.EditReq) error {\n\t_, err := a.Client.performQuery(http.MethodPost, \"/clusters/edit\", editReq, nil)\n\treturn err\n}", "func editColor(w http.ResponseWriter, req *http.Request) {\n\t// grabs the data from the request\n\tcolorHash := req.FormValue(\"color\")\n\tnewColorName := req.FormValue(\"name\")\n\t// creates a new query string\n\tq := fmt.Sprintf(`UPDATE colors SET color = '%v' WHERE hex = '%v' `, newColorName, colorHash)\n\n\tstmt, err := db.Prepare(q)\n\t// checks the error\n\thtmlCheck(err, w, fmt.Sprint(\"There was an error \", err))\n\n\tr, err := stmt.Exec()\n\t// checks the error\n\thtmlCheck(err, w, fmt.Sprint(\"There was an error \", err))\n\n\tn, err := r.RowsAffected()\n\t// checks the error\n\thtmlCheck(err, w, fmt.Sprint(\"There was an error \", err))\n\n\t// writes the correct status to respond successful\n\tw.WriteHeader(http.StatusCreated)\n\tfmt.Fprintln(w, n)\n}", "func (c *Config) UpdateCustomCloudClusterDefinition(ccc *CustomCloudConfig) error {\n\tclusterDefinitionFullPath := fmt.Sprintf(\"%s/%s\", c.CurrentWorkingDir, c.ClusterDefinition)\n\tcs := parseVlabsContainerSerice(clusterDefinitionFullPath)\n\n\tcs.Location = c.Location\n\tcs.Properties.CustomCloudProfile.PortalURL = ccc.PortalURL\n\tcs.Properties.ServicePrincipalProfile.ClientID = ccc.CustomCloudClientID\n\tcs.Properties.ServicePrincipalProfile.Secret = ccc.CustomCloudSecret\n\tcs.Properties.CustomCloudProfile.AuthenticationMethod = ccc.AuthenticationMethod\n\tcs.Properties.CustomCloudProfile.IdentitySystem = ccc.IdentitySystem\n\n\tif ccc.AuthenticationMethod == \"client_certificate\" {\n\t\tcs.Properties.ServicePrincipalProfile.Secret = \"\"\n\t\tcs.Properties.ServicePrincipalProfile.KeyvaultSecretRef = &vlabs.KeyvaultSecretRef{\n\t\t\tVaultID: ccc.VaultID,\n\t\t\tSecretName: ccc.SecretName,\n\t\t}\n\t}\n\n\tcsBytes, err := json.Marshal(cs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error fail to marshal containerService object %p\", err)\n\t}\n\terr = os.WriteFile(clusterDefinitionFullPath, csBytes, 644)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error fail to write file object %p\", err)\n\t}\n\treturn nil\n}", "func CreateCluster(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Cluster creation is stared\")\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Bind json into CreateClusterRequest struct\")\n\n\t// bind request body to struct\n\tvar createClusterBaseRequest banzaiTypes.CreateClusterRequest\n\tif err := c.BindJSON(&createClusterBaseRequest); err != nil {\n\t\t// bind failed\n\t\tbanzaiUtils.LogError(banzaiConstants.TagCreateCluster, \"Required field is empty: \"+err.Error())\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: \"Required field is empty\",\n\t\t\tcloud.JsonKeyError: err,\n\t\t})\n\t\treturn\n\t} else {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Bind succeeded\")\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Searching entry with name:\", createClusterBaseRequest.Name)\n\tvar savedCluster banzaiSimpleTypes.ClusterSimple\n\n\tdatabase.Query(\"SELECT * FROM \"+banzaiSimpleTypes.ClusterSimple.TableName(savedCluster)+\" WHERE name = ?;\",\n\t\tcreateClusterBaseRequest.Name,\n\t\t&savedCluster)\n\n\tif savedCluster.ID != 0 {\n\t\t// duplicated entry\n\t\tmsg := \"Duplicate entry '\" + savedCluster.Name + \"' for key 'name'\"\n\t\tbanzaiUtils.LogError(banzaiConstants.TagCreateCluster, msg)\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: msg,\n\t\t})\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"No entity with this name exists. The creation is possible.\")\n\n\tcloudType := createClusterBaseRequest.Cloud\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Cloud type is \", cloudType)\n\n\tswitch cloudType {\n\tcase banzaiConstants.Amazon:\n\t\t// validate and create Amazon cluster\n\t\tawsData := createClusterBaseRequest.Properties.CreateClusterAmazon\n\t\tif isValid, err := awsData.Validate(); isValid && len(err) == 0 {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Validation is OK\")\n\t\t\tif isOk, createdCluster := cloud.CreateClusterAmazon(&createClusterBaseRequest, c); isOk {\n\t\t\t\t// update prometheus config..\n\t\t\t\tgo updatePrometheusWithRetryConf(createdCluster)\n\t\t\t}\n\t\t} else {\n\t\t\t// not valid request\n\t\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\t\tcloud.JsonKeyMessage: err,\n\t\t\t})\n\t\t}\n\tcase banzaiConstants.Azure:\n\t\t// validate and create Azure cluster\n\t\taksData := createClusterBaseRequest.Properties.CreateClusterAzure\n\t\tif isValid, err := aksData.Validate(); isValid && len(err) == 0 {\n\t\t\tif cloud.CreateClusterAzure(&createClusterBaseRequest, c) {\n\t\t\t\t// update prometheus config..\n\t\t\t\tupdatePrometheus()\n\t\t\t}\n\t\t} else {\n\t\t\t// not valid request\n\t\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\t\tcloud.JsonKeyMessage: err,\n\t\t\t})\n\t\t}\n\tdefault:\n\t\t// wrong cloud type\n\t\tcloud.SendNotSupportedCloudResponse(c, banzaiConstants.TagCreateCluster)\n\t}\n\n}", "func (c *FakeDaskClusters) Update(ctx context.Context, daskCluster *kubernetesdaskorgv1.DaskCluster, opts v1.UpdateOptions) (result *kubernetesdaskorgv1.DaskCluster, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(daskclustersResource, c.ns, daskCluster), &kubernetesdaskorgv1.DaskCluster{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*kubernetesdaskorgv1.DaskCluster), err\n}", "func ReconfigureTincCluster(log *logging.Logger, info ClusterInfo, provider CloudProvider) error {\n\t// Load all instances\n\tinstances, err := provider.GetInstances(info)\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\t// Call reconfigure-tinc-host on all instances\n\tif instances.ReconfigureTincCluster(log); err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\treturn nil\n}", "func (a *HyperflexApiService) UpdateHyperflexClusterExecute(r ApiUpdateHyperflexClusterRequest) (*HyperflexCluster, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue *HyperflexCluster\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"HyperflexApiService.UpdateHyperflexCluster\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/hyperflex/Clusters/{Moid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"Moid\"+\"}\", url.PathEscape(parameterToString(r.moid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\tif r.hyperflexCluster == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"hyperflexCluster is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\", \"application/json-patch+json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ifMatch != nil {\n\t\tlocalVarHeaderParams[\"If-Match\"] = parameterToString(*r.ifMatch, \"\")\n\t}\n\t// body params\n\tlocalVarPostBody = r.hyperflexCluster\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v Error\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (a *Client) UpgradeDatalakeClusterByCrn(params *UpgradeDatalakeClusterByCrnParams) (*UpgradeDatalakeClusterByCrnOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpgradeDatalakeClusterByCrnParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"upgradeDatalakeClusterByCrn\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/sdx/crn/{crn}/upgrade\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &UpgradeDatalakeClusterByCrnReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*UpgradeDatalakeClusterByCrnOK), nil\n\n}", "func UpdateClusterAzureInCloud(r *banzaiTypes.UpdateClusterRequest, c *gin.Context, preCluster banzaiSimpleTypes.ClusterSimple) bool {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagUpdateCluster, \"Start updating cluster (azure)\")\n\n\tif r == nil {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagUpdateCluster, \"<nil> update cluster\")\n\t\treturn false\n\t}\n\n\tcluster2Db := banzaiSimpleTypes.ClusterSimple{\n\t\tModel: preCluster.Model,\n\t\tName: preCluster.Name,\n\t\tLocation: preCluster.Location,\n\t\tNodeInstanceType: preCluster.NodeInstanceType,\n\t\tCloud: r.Cloud,\n\t\tAzure: banzaiSimpleTypes.AzureClusterSimple{\n\t\t\tResourceGroup: preCluster.Azure.ResourceGroup,\n\t\t\tAgentCount: r.UpdateClusterAzure.AgentCount,\n\t\t\tAgentName: preCluster.Azure.AgentName,\n\t\t\tKubernetesVersion: preCluster.Azure.KubernetesVersion,\n\t\t},\n\t}\n\n\tccr := azureCluster.CreateClusterRequest{\n\t\tName: cluster2Db.Name,\n\t\tLocation: cluster2Db.Location,\n\t\tVMSize: cluster2Db.NodeInstanceType,\n\t\tResourceGroup: cluster2Db.Azure.ResourceGroup,\n\t\tAgentCount: cluster2Db.Azure.AgentCount,\n\t\tAgentName: cluster2Db.Azure.AgentName,\n\t\tKubernetesVersion: cluster2Db.Azure.KubernetesVersion,\n\t}\n\n\tres, err := azureClient.CreateUpdateCluster(ccr)\n\tif err != nil {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagUpdateCluster, \"Cluster update failed!\", err.Message)\n\t\tSetResponseBodyJson(c, err.StatusCode, gin.H{\n\t\t\tJsonKeyStatus: err.StatusCode,\n\t\t\tJsonKeyMessage: err.Message,\n\t\t})\n\t\treturn false\n\t} else {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagUpdateCluster, \"Cluster update succeeded\")\n\t\t// updateDb\n\t\tif updateClusterInDb(c, cluster2Db) {\n\t\t\t// success update\n\t\t\tSetResponseBodyJson(c, res.StatusCode, gin.H{\n\t\t\t\tJsonKeyResourceId: cluster2Db.ID,\n\t\t\t\tJsonKeyData: res.Value,\n\t\t\t})\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n}", "func (a *HyperflexApiService) UpdateHyperflexClusterProfile(ctx context.Context, moid string) ApiUpdateHyperflexClusterProfileRequest {\n\treturn ApiUpdateHyperflexClusterProfileRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (w *worker) reconcileCluster(cluster *chop.ChiCluster) error {\n\tw.a.V(2).M(cluster).S().P()\n\tdefer w.a.V(2).M(cluster).E().P()\n\n\t// Add Cluster's Service\n\tservice := w.creator.CreateServiceCluster(cluster)\n\tif service == nil {\n\t\t// TODO\n\t\t// For somewhat reason Service is not created, this is an error, but not clear what to do about it\n\t\treturn nil\n\t}\n\treturn w.reconcileService(cluster.CHI, service)\n}", "func SyncClusterInfoToPassCC(taskID string, cluster *proto.Cluster) {\n\terr := passcc.GetCCClient().CreatePassCCCluster(cluster)\n\tif err != nil {\n\t\tblog.Errorf(\"UpdateCreateClusterDBInfoTask[%s] syncClusterInfoToPassCC CreatePassCCCluster[%s] failed: %v\",\n\t\t\ttaskID, cluster.ClusterID, err)\n\t} else {\n\t\tblog.Infof(\"UpdateCreateClusterDBInfoTask[%s] syncClusterInfoToPassCC CreatePassCCCluster[%s] successful\",\n\t\t\ttaskID, cluster.ClusterID)\n\t}\n\n\terr = passcc.GetCCClient().CreatePassCCClusterSnapshoot(cluster)\n\tif err != nil {\n\t\tblog.Errorf(\"UpdateCreateClusterDBInfoTask[%s] syncClusterInfoToPassCC CreatePassCCClusterSnapshoot[%s] failed: %v\",\n\t\t\ttaskID, cluster.ClusterID, err)\n\t} else {\n\t\tblog.Infof(\"UpdateCreateClusterDBInfoTask[%s] syncClusterInfoToPassCC CreatePassCCClusterSnapshoot[%s] successful\",\n\t\t\ttaskID, cluster.ClusterID)\n\t}\n}", "func (throttler *Throttler) updateMySQLClusterProbes(ctx context.Context, clusterProbes *mysql.ClusterProbes) error {\n\tthrottler.mysqlInventory.ClustersProbes[clusterProbes.ClusterName] = clusterProbes.InstanceProbes\n\tthrottler.mysqlInventory.IgnoreHostsCount[clusterProbes.ClusterName] = clusterProbes.IgnoreHostsCount\n\tthrottler.mysqlInventory.IgnoreHostsThreshold[clusterProbes.ClusterName] = clusterProbes.IgnoreHostsThreshold\n\treturn nil\n}", "func (p PGSQLConnection) CreateCluster(cluster *ClusterModel) error {\n\ttx, err := p.connection.Beginx()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = tx.NamedExec(\"INSERT INTO clusters (cluster_name, color) VALUES (:cluster_name, :color)\", cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn tx.Commit()\n}", "func (s *StatusReconciler) calculateClusterStatus(stat *vapi.VerticaDBStatus) {\n\tstat.SubclusterCount = 0\n\tstat.InstallCount = 0\n\tstat.AddedToDBCount = 0\n\tstat.UpNodeCount = 0\n\tfor _, sc := range stat.Subclusters {\n\t\tstat.SubclusterCount++\n\t\tstat.InstallCount += sc.InstallCount\n\t\tstat.AddedToDBCount += sc.AddedToDBCount\n\t\tstat.UpNodeCount += sc.UpNodeCount\n\t}\n}", "func (a *Server) UpsertTrustedCluster(ctx context.Context, trustedCluster types.TrustedCluster) (newTrustedCluster types.TrustedCluster, returnErr error) {\n\t// It is recommended to omit trusted cluster name because the trusted cluster name\n\t// is updated to the roots cluster name during the handshake with the root cluster.\n\tvar existingCluster types.TrustedCluster\n\tif trustedCluster.GetName() != \"\" {\n\t\tvar err error\n\t\texistingCluster, err = a.GetTrustedCluster(ctx, trustedCluster.GetName())\n\t\tif err != nil && !trace.IsNotFound(err) {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t}\n\n\tenable := trustedCluster.GetEnabled()\n\n\t// If the trusted cluster already exists in the backend, make sure it's a\n\t// valid state change client is trying to make.\n\tif existingCluster != nil {\n\t\tif err := existingCluster.CanChangeStateTo(trustedCluster); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t}\n\n\tlogger := log.WithField(\"trusted_cluster\", trustedCluster.GetName())\n\n\t// change state\n\tif err := a.checkLocalRoles(ctx, trustedCluster.GetRoleMap()); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t// Update role map\n\tif existingCluster != nil && !cmp.Equal(existingCluster.GetRoleMap(), trustedCluster.GetRoleMap()) {\n\t\tif err := a.UpdateUserCARoleMap(ctx, existingCluster.GetName(), trustedCluster.GetRoleMap(),\n\t\t\texistingCluster.GetEnabled()); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\t// Reset previous UserCA role map if this func fails later on\n\t\tdefer func() {\n\t\t\tif returnErr != nil {\n\t\t\t\tif err := a.UpdateUserCARoleMap(ctx, trustedCluster.GetName(), existingCluster.GetRoleMap(),\n\t\t\t\t\ttrustedCluster.GetEnabled()); err != nil {\n\t\t\t\t\treturnErr = trace.NewAggregate(err, returnErr)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\t// Create or update state\n\tswitch {\n\tcase existingCluster != nil && enable == true:\n\t\tif existingCluster.GetEnabled() {\n\t\t\tbreak\n\t\t}\n\t\tlog.Debugf(\"Enabling existing Trusted Cluster relationship.\")\n\n\t\tif err := a.activateCertAuthority(trustedCluster); err != nil {\n\t\t\tif trace.IsNotFound(err) {\n\t\t\t\treturn nil, trace.BadParameter(\"enable only supported for Trusted Clusters created with Teleport 2.3 and above\")\n\t\t\t}\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tif err := a.createReverseTunnel(trustedCluster); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\tcase existingCluster != nil && enable == false:\n\t\tif !existingCluster.GetEnabled() {\n\t\t\tbreak\n\t\t}\n\t\tlog.Debugf(\"Disabling existing Trusted Cluster relationship.\")\n\n\t\tif err := a.deactivateCertAuthority(trustedCluster); err != nil {\n\t\t\tif trace.IsNotFound(err) {\n\t\t\t\treturn nil, trace.BadParameter(\"enable only supported for Trusted Clusters created with Teleport 2.3 and above\")\n\t\t\t}\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tif err := a.DeleteReverseTunnel(trustedCluster.GetName()); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\tcase existingCluster == nil && enable == true:\n\t\tlogger.Info(\"Creating enabled Trusted Cluster relationship.\")\n\n\t\tremoteCAs, err := a.establishTrust(ctx, trustedCluster)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\t// Force name of the trusted cluster resource\n\t\t// to be equal to the name of the remote cluster it is connecting to.\n\t\ttrustedCluster.SetName(remoteCAs[0].GetClusterName())\n\n\t\tif err := a.addCertAuthorities(ctx, trustedCluster, remoteCAs); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tif err := a.createReverseTunnel(trustedCluster); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\tcase existingCluster == nil && enable == false:\n\t\tlogger.Info(\"Creating disabled Trusted Cluster relationship.\")\n\n\t\tremoteCAs, err := a.establishTrust(ctx, trustedCluster)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\t// Force name to the name of the trusted cluster.\n\t\ttrustedCluster.SetName(remoteCAs[0].GetClusterName())\n\n\t\tif err := a.addCertAuthorities(ctx, trustedCluster, remoteCAs); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tif err := a.deactivateCertAuthority(trustedCluster); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t}\n\n\ttc, err := a.Services.UpsertTrustedCluster(ctx, trustedCluster)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tif err := a.emitter.EmitAuditEvent(ctx, &apievents.TrustedClusterCreate{\n\t\tMetadata: apievents.Metadata{\n\t\t\tType: events.TrustedClusterCreateEvent,\n\t\t\tCode: events.TrustedClusterCreateCode,\n\t\t},\n\t\tUserMetadata: authz.ClientUserMetadata(ctx),\n\t\tResourceMetadata: apievents.ResourceMetadata{\n\t\t\tName: trustedCluster.GetName(),\n\t\t},\n\t}); err != nil {\n\t\tlogger.WithError(err).Warn(\"Failed to emit trusted cluster create event.\")\n\t}\n\n\treturn tc, nil\n}", "func (c *ClientWithResponses) UpdateSksClusterWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader) (*UpdateSksClusterResponse, error) {\n\trsp, err := c.UpdateSksClusterWithBody(ctx, id, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseUpdateSksClusterResponse(rsp)\n}", "func (coc *CoClustering) clusterMean(dst []float64, clusters []int, ratings []*base.SparseVector) {\n\tbase.FillZeroVector(dst)\n\tcount := make([]float64, len(dst))\n\tfor id, cluster := range clusters {\n\t\tratings[id].ForEach(func(_, index int, value float64) {\n\t\t\tdst[cluster] += value\n\t\t\tcount[cluster]++\n\t\t})\n\t}\n\tfor i := range dst {\n\t\tif count[i] > 0 {\n\t\t\tdst[i] /= count[i]\n\t\t} else {\n\t\t\tdst[i] = coc.GlobalMean\n\t\t}\n\t}\n}", "func (u *User) SetColor(asset, hexcode string) error {\n\tpath := fmt.Sprintf(\"users/%d/colors/%s\", u.ID, asset)\n\tif hexcode[0] == '#' {\n\t\thexcode = hexcode[1:]\n\t}\n\n\tresp, err := put(u.client, path, params{\"hexcode\": {hexcode}})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn resp.Body.Close()\n}", "func (s *BasecluListener) ExitCluster(ctx *ClusterContext) {}", "func MutateCluster(cluster *clusterapis.Cluster) {\n\tmutateClusterTaints(cluster.Spec.Taints)\n\tmigrateZoneToZones(cluster)\n}", "func UpdateClusterState(csr *clusterstate.ClusterStateRegistry) {\n\tif csr == nil || reflect.ValueOf(csr).IsNil() {\n\t\treturn\n\t}\n\tif csr.IsClusterHealthy() {\n\t\tclusterSafeToAutoscale.Set(1)\n\t} else {\n\t\tclusterSafeToAutoscale.Set(0)\n\t}\n\treadiness := csr.GetClusterReadiness()\n\tnodesCount.WithLabelValues(readyLabel).Set(float64(readiness.Ready))\n\tnodesCount.WithLabelValues(unreadyLabel).Set(float64(readiness.Unready + readiness.LongNotStarted))\n\tnodesCount.WithLabelValues(startingLabel).Set(float64(readiness.NotStarted))\n}", "func ExampleConnectedClusterClient_Update() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armhybridkubernetes.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewConnectedClusterClient().Update(ctx, \"k8sc-rg\", \"testCluster\", armhybridkubernetes.ConnectedClusterPatch{\n\t\tTags: map[string]*string{\n\t\t\t\"tag1\": to.Ptr(\"value1\"),\n\t\t\t\"tag2\": to.Ptr(\"value2\"),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ConnectedCluster = armhybridkubernetes.ConnectedCluster{\n\t// \tName: to.Ptr(\"connectedCluster1\"),\n\t// \tType: to.Ptr(\"Microsoft.Kubernetes/connectedClusters\"),\n\t// \tID: to.Ptr(\"/subscriptions/1bfbb5d0-917e-4346-9026-1d3b344417f5/resourceGroups/akkeshar/providers/Microsoft.Kubernetes/connectedClusters/connectedCluster1\"),\n\t// \tLocation: to.Ptr(\"East US\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tIdentity: &armhybridkubernetes.ConnectedClusterIdentity{\n\t// \t\tType: to.Ptr(armhybridkubernetes.ResourceIdentityTypeSystemAssigned),\n\t// \t},\n\t// \tProperties: &armhybridkubernetes.ConnectedClusterProperties{\n\t// \t\tAgentPublicKeyCertificate: to.Ptr(\"MIICYzCCAcygAwIBAgIBADANBgkqhkiG9w0BAQUFADAuMQswCQYDVQQGEwJVUzEMMAoGA1UEChMDSUJNMREwDwYDVQQLEwhMb2NhbCBDQTAeFw05OTEyMjIwNTAwMDBaFw0wMDEyMjMwNDU5NTlaMC4xCzAJBgNVBAYTAlVTMQwwCgYDVQQKEwNJQk0xETAPBgNVBAsTCExvY2FsIENBMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQD2bZEo7xGaX2/0GHkrNFZvlxBou9v1Jmt/PDiTMPve8r9FeJAQ0QdvFST/0JPQYD20rH0bimdDLgNdNynmyRoS2S/IInfpmf69iyc2G0TPyRvmHIiOZbdCd+YBHQi1adkj17NDcWj6S14tVurFX73zx0sNoMS79q3tuXKrDsxeuwIDAQABo4GQMIGNMEsGCVUdDwGG+EIBDQQ+EzxHZW5lcmF0ZWQgYnkgdGhlIFNlY3VyZVdheSBTZWN1cml0eSBTZXJ2ZXIgZm9yIE9TLzM5MCAoUkFDRikwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ3+ocRyCTJw067dLSwr/nalx6YMMA0GCSqGSIb3DQEBBQUAA4GBAMaQzt+zaj1GU77yzlr8iiMBXgdQrwsZZWJo5exnAucJAEYQZmOfyLiM D6oYq+ZnfvM0n8G/Y79q8nhwvuxpYOnRSAXFp6xSkrIOeZtJMY1h00LKp/JX3Ng1svZ2agE126JHsQ0bhzN5TKsYfbwfTwfjdWAGy6Vf1nYi/rO+ryMO\"),\n\t// \t\tAgentVersion: to.Ptr(\"0.1.0\"),\n\t// \t\tKubernetesVersion: to.Ptr(\"1.17.0\"),\n\t// \t\tProvisioningState: to.Ptr(armhybridkubernetes.ProvisioningStateSucceeded),\n\t// \t\tTotalNodeCount: to.Ptr[int32](2),\n\t// \t},\n\t// \tSystemData: &armhybridkubernetes.SystemData{\n\t// \t\tCreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-12-17T07:06:33.9173186Z\"); return t}()),\n\t// \t\tCreatedBy: to.Ptr(\"[email protected]\"),\n\t// \t\tCreatedByType: to.Ptr(armhybridkubernetes.CreatedByTypeUser),\n\t// \t\tLastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-12-17T07:14:58.865041Z\"); return t}()),\n\t// \t\tLastModifiedBy: to.Ptr(\"2d2a754c-cade-4935-83d4-ce413c5a3910\"),\n\t// \t\tLastModifiedByType: to.Ptr(armhybridkubernetes.LastModifiedByTypeApplication),\n\t// \t},\n\t// }\n}", "func (coc *CoClustering) clusterMean(dst []float64, clusters []int, ratings []*base.MarginalSubSet) {\n\tbase.FillZeroVector(dst)\n\tcount := make([]float64, len(dst))\n\tfor index, cluster := range clusters {\n\t\tratings[index].ForEachIndex(func(_, index int, value float64) {\n\t\t\tdst[cluster] += value\n\t\t\tcount[cluster]++\n\t\t})\n\t}\n\tfor i := range dst {\n\t\tif count[i] > 0 {\n\t\t\tdst[i] /= count[i]\n\t\t} else {\n\t\t\tdst[i] = coc.GlobalMean\n\t\t}\n\t}\n}", "func ExampleConnectedClusterClient_Update() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armhybridkubernetes.NewConnectedClusterClient(\"1bfbb5d0-917e-4346-9026-1d3b344417f5\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := client.Update(ctx,\n\t\t\"k8sc-rg\",\n\t\t\"testCluster\",\n\t\tarmhybridkubernetes.ConnectedClusterPatch{\n\t\t\tTags: map[string]*string{\n\t\t\t\t\"tag1\": to.Ptr(\"value1\"),\n\t\t\t\t\"tag2\": to.Ptr(\"value2\"),\n\t\t\t},\n\t\t},\n\t\tnil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// TODO: use response item\n\t_ = res\n}", "func (s *IngestStep) Cluster(schemaFile string, dataset string,\n\trootDataPath string, outputFolder string, hasHeader bool) error {\n\toutputSchemaPath := path.Join(outputFolder, D3MSchemaPathRelative)\n\toutputDataPath := path.Join(outputFolder, D3MDataPathRelative)\n\tsourceFolder := path.Dir(dataset)\n\n\t// copy the source folder to have all the linked files for merging\n\tos.MkdirAll(outputFolder, os.ModePerm)\n\terr := copy.Copy(sourceFolder, outputFolder)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to copy source data\")\n\t}\n\n\t// delete the existing files that will be overwritten\n\tos.Remove(outputSchemaPath)\n\tos.Remove(outputDataPath)\n\n\t// load metadata from original schema\n\tmeta, err := metadata.LoadMetadataFromOriginalSchema(schemaFile)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to load original schema file\")\n\t}\n\tmainDR := meta.GetMainDataResource()\n\n\t// add feature variables\n\tfeatures, err := getClusterVariables(meta, \"_cluster_\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to get cluster variables\")\n\t}\n\n\td3mIndexField := getD3MIndexField(mainDR)\n\n\t// open the input file\n\tdataPath := path.Join(rootDataPath, mainDR.ResPath)\n\tlines, err := s.readCSVFile(dataPath, hasHeader)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error reading raw data\")\n\t}\n\n\t// add the cluster data to the raw data\n\tfor _, f := range features {\n\t\tmainDR.Variables = append(mainDR.Variables, f.Variable)\n\n\t\tlines, err = s.appendFeature(sourceFolder, d3mIndexField, false, f, lines)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error appending clustered data\")\n\t\t}\n\t}\n\n\t// initialize csv writer\n\toutput := &bytes.Buffer{}\n\twriter := csv.NewWriter(output)\n\n\t// output the header\n\theader := make([]string, len(mainDR.Variables))\n\tfor _, v := range mainDR.Variables {\n\t\theader[v.Index] = v.Name\n\t}\n\terr = writer.Write(header)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error storing clustered header\")\n\t}\n\n\tfor _, line := range lines {\n\t\terr = writer.Write(line)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error storing clustered output\")\n\t\t}\n\t}\n\n\t// output the data with the new feature\n\twriter.Flush()\n\n\terr = util.WriteFileWithDirs(outputDataPath, output.Bytes(), os.ModePerm)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error writing clustered output\")\n\t}\n\n\trelativePath := getRelativePath(path.Dir(outputSchemaPath), outputDataPath)\n\tmainDR.ResPath = relativePath\n\n\t// write the new schema to file\n\terr = metadata.WriteSchema(meta, outputSchemaPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to store cluster schema\")\n\t}\n\n\treturn nil\n}", "func (adm Admin) AddCluster(cluster string, recreateIfExists bool) bool {\n\tkb := &KeyBuilder{cluster}\n\t// c = \"/<cluster>\"\n\tc := kb.cluster()\n\n\t// check if cluster already exists\n\texists, _, err := adm.zkClient.Exists(c)\n\tif err != nil || (exists && !recreateIfExists) {\n\t\treturn false\n\t}\n\n\tif recreateIfExists {\n\t\tif err := adm.zkClient.DeleteTree(c); err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tadm.zkClient.CreateEmptyNode(c)\n\n\t// PROPERTYSTORE is an empty node\n\tpropertyStore := fmt.Sprintf(\"/%s/PROPERTYSTORE\", cluster)\n\tadm.zkClient.CreateEmptyNode(propertyStore)\n\n\t// STATEMODELDEFS has 6 children\n\tstateModelDefs := fmt.Sprintf(\"/%s/STATEMODELDEFS\", cluster)\n\tadm.zkClient.CreateEmptyNode(stateModelDefs)\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/LeaderStandby\", []byte(_helixDefaultNodes[\"LeaderStandby\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/MasterSlave\", []byte(_helixDefaultNodes[\"MasterSlave\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/OnlineOffline\", []byte(_helixDefaultNodes[StateModelNameOnlineOffline]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/STORAGE_DEFAULT_SM_SCHEMATA\",\n\t\t[]byte(_helixDefaultNodes[\"STORAGE_DEFAULT_SM_SCHEMATA\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/SchedulerTaskQueue\", []byte(_helixDefaultNodes[\"SchedulerTaskQueue\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/Task\", []byte(_helixDefaultNodes[\"Task\"]))\n\n\t// INSTANCES is initailly an empty node\n\tinstances := fmt.Sprintf(\"/%s/INSTANCES\", cluster)\n\tadm.zkClient.CreateEmptyNode(instances)\n\n\t// CONFIGS has 3 children: CLUSTER, RESOURCE, PARTICIPANT\n\tconfigs := fmt.Sprintf(\"/%s/CONFIGS\", cluster)\n\tadm.zkClient.CreateEmptyNode(configs)\n\tadm.zkClient.CreateEmptyNode(configs + \"/PARTICIPANT\")\n\tadm.zkClient.CreateEmptyNode(configs + \"/RESOURCE\")\n\tadm.zkClient.CreateEmptyNode(configs + \"/CLUSTER\")\n\n\tclusterNode := model.NewMsg(cluster)\n\taccessor := newDataAccessor(adm.zkClient, kb)\n\taccessor.createMsg(configs+\"/CLUSTER/\"+cluster, clusterNode)\n\n\t// empty ideal states\n\tidealStates := fmt.Sprintf(\"/%s/IDEALSTATES\", cluster)\n\tadm.zkClient.CreateEmptyNode(idealStates)\n\n\t// empty external view\n\texternalView := fmt.Sprintf(\"/%s/EXTERNALVIEW\", cluster)\n\tadm.zkClient.CreateEmptyNode(externalView)\n\n\t// empty live instances\n\tliveInstances := fmt.Sprintf(\"/%s/LIVEINSTANCES\", cluster)\n\tadm.zkClient.CreateEmptyNode(liveInstances)\n\n\t// CONTROLLER has four childrens: [ERRORS, HISTORY, MESSAGES, STATUSUPDATES]\n\tcontroller := fmt.Sprintf(\"/%s/CONTROLLER\", cluster)\n\tadm.zkClient.CreateEmptyNode(controller)\n\tadm.zkClient.CreateEmptyNode(controller + \"/ERRORS\")\n\tadm.zkClient.CreateEmptyNode(controller + \"/HISTORY\")\n\tadm.zkClient.CreateEmptyNode(controller + \"/MESSAGES\")\n\tadm.zkClient.CreateEmptyNode(controller + \"/STATUSUPDATES\")\n\n\treturn true\n}", "func (r *Cluster) ApplyTo(m *model.Cluster) {\n\tm.Name = r.Name\n\tm.Description = r.Description\n\tm.DataCenter = r.DataCenter.ID\n\tm.HaReservation = r.bool(r.HaReservation)\n\tm.KsmEnabled = r.bool(r.KSM.Enabled)\n}", "func (r *MysqlUserReconciler) reconcileUserInCluster(ctx context.Context, mysqlUser *mysqluser.MysqlUser) (err error) {\n\t// Catch the error and set the failed status.\n\tdefer setFailedStatus(&err, mysqlUser)\n\n\t// Reconcile the mysqlUser into mysql.\n\tif err = r.reconcileUserInDB(ctx, mysqlUser); err != nil {\n\t\treturn\n\t}\n\n\t// Add finalizer if is not added on the resource.\n\tif !meta.HasFinalizer(&mysqlUser.ObjectMeta, userFinalizer) {\n\t\tmeta.AddFinalizer(&mysqlUser.ObjectMeta, userFinalizer)\n\t\tif err = r.Update(ctx, mysqlUser.Unwrap()); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Update status for allowedHosts if needed, mark that status need to be updated.\n\tif !reflect.DeepEqual(mysqlUser.Status.AllowedHosts, mysqlUser.Spec.Hosts) {\n\t\tmysqlUser.Status.AllowedHosts = mysqlUser.Spec.Hosts\n\t}\n\n\t// Update the status according to the result.\n\tmysqlUser.UpdateStatusCondition(\n\t\tapiv1alpha1.MySQLUserReady, corev1.ConditionTrue,\n\t\tmysqluser.ProvisionSucceededReason, \"The user provisioning has succeeded.\",\n\t)\n\n\treturn\n}", "func (c *AKSCluster) updateWithPolling(client *azureClient.AKSClient, ccr *azureCluster.CreateClusterRequest) (*banzaiAzureTypes.ResponseWithValue, error) {\n\n\tlog.Info(\"Send update request to azure\")\n\t_, err := azureClient.CreateUpdateCluster(client, ccr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Info(\"Polling to check update\")\n\t// polling to check cluster updated\n\tupdatedCluster, err := azureClient.PollingCluster(client, c.modelCluster.Name, c.modelCluster.Azure.ResourceGroup)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Info(\"Cluster updated successfully\")\n\treturn updatedCluster, nil\n}", "func getClusterMetric(result model.Value, metric string) {\n\t//Validate there is data in the results.\n\tif result == nil {\n\t\treturn\n\t}\n\n\tif mat, ok := result.(model.Matrix); ok {\n\t\tfor i := 0; i < mat.Len(); i++ {\n\t\t\t//validates that the value of the entity is set and if not will default to 0\n\t\t\tvar value float64\n\t\t\tif len(mat[i].Values) == 0 {\n\t\t\t\tvalue = 0\n\t\t\t} else {\n\t\t\t\tvalue = float64(mat[i].Values[len(mat[i].Values)-1].Value)\n\t\t\t}\n\n\t\t\t//Check which metric this is for and update the corresponding variable for this container in the system data structure\n\n\t\t\tswitch metric {\n\t\t\tcase \"limits\":\n\t\t\t\tswitch mat[i].Metric[\"resource\"] {\n\t\t\t\tcase \"memory\":\n\t\t\t\t\tclusterEntity.memLimit = int(value / 1024 / 1024)\n\t\t\t\tcase \"cpu\":\n\t\t\t\t\tclusterEntity.cpuLimit = int(value * 1000)\n\t\t\t\t}\n\t\t\tcase \"requests\":\n\t\t\t\tswitch mat[i].Metric[\"resource\"] {\n\t\t\t\tcase \"memory\":\n\t\t\t\t\tclusterEntity.memRequest = int(value / 1024 / 1024)\n\t\t\t\tcase \"cpu\":\n\t\t\t\t\tclusterEntity.cpuRequest = int(value * 1000)\n\t\t\t\t}\n\t\t\tcase \"cpuLimit\":\n\t\t\t\tclusterEntity.cpuLimit = int(value)\n\t\t\tcase \"cpuRequest\":\n\t\t\t\tclusterEntity.cpuRequest = int(value)\n\t\t\tcase \"memLimit\":\n\t\t\t\tclusterEntity.memLimit = int(value)\n\t\t\tcase \"memRequest\":\n\t\t\t\tclusterEntity.memRequest = int(value)\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (r *ReconcileHumioCluster) setState(ctx context.Context, state string, hc *corev1alpha1.HumioCluster) error {\n\tr.logger.Infof(\"setting cluster state to %s\", state)\n\thc.Status.State = state\n\terr := r.client.Status().Update(ctx, hc)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func fetchCluster(c *gin.Context) string {\n\tconst key = \"cluster\"\n\n\tswitch {\n\tcase len(c.Param(key)) > 0:\n\t\treturn c.Param(key)\n\tcase len(c.Query(key)) > 0:\n\t\treturn c.Query(key)\n\tcase len(c.PostForm(key)) > 0:\n\t\treturn c.PostForm(key)\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (cc *ClusterReconciler) sync(c *scyllav1.ScyllaCluster) error {\n\tctx := log.WithNewTraceID(context.Background())\n\tlogger := cc.Logger.With(\"cluster\", c.Namespace+\"/\"+c.Name, \"resourceVersion\", c.ResourceVersion)\n\tlogger.Info(ctx, \"Starting reconciliation...\")\n\tlogger.Debug(ctx, \"Cluster State\", \"object\", c)\n\n\t// Before syncing, ensure that all StatefulSets are up-to-date\n\tstale, err := util.AreStatefulSetStatusesStale(ctx, c, cc.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to check sts staleness\")\n\t}\n\tif stale {\n\t\tlogger.Debug(ctx, \"StatefulSets are not ready!\")\n\t\treturn nil\n\t}\n\tlogger.Debug(ctx, \"All StatefulSets are up-to-date!\")\n\n\t// Cleanup Cluster resources\n\tif err := cc.cleanup(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessageCleanupFailed)\n\t}\n\n\t// Sync Headless Service for Cluster\n\tif err := cc.syncClusterHeadlessService(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessageHeadlessServiceSyncFailed)\n\t\treturn errors.Wrap(err, \"failed to sync headless service\")\n\t}\n\n\t// Sync Cluster Pod Disruption Budget\n\tif err := cc.syncPodDisruptionBudget(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessagePodDisruptionBudgetSyncFailed)\n\t\treturn errors.Wrap(err, \"failed to sync pod disruption budget\")\n\t}\n\n\t// Sync Agent auth token\n\tif err := cc.syncAgentAuthToken(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessageAgentTokenSyncFailed)\n\t\treturn errors.Wrap(err, \"failed to sync agent auth token\")\n\t}\n\n\t// Sync Cluster Member Services\n\tif err := cc.syncMemberServices(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessageMemberServicesSyncFailed)\n\t\treturn errors.Wrap(err, \"failed to sync member service\")\n\t}\n\n\t// Update Status\n\tlogger.Info(ctx, \"Calculating cluster status...\")\n\tif err := cc.updateStatus(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, fmt.Sprintf(MessageUpdateStatusFailed, err))\n\t\treturn errors.Wrap(err, \"failed to update status\")\n\t}\n\n\t// Calculate and execute next action\n\tif act, err := cc.nextAction(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, fmt.Sprintf(MessageUpdateStatusFailed, err))\n\t\treturn errors.Wrap(err, \"failed to determine next action\")\n\t} else if act != nil {\n\t\ts := actions.NewState(cc.Client, cc.KubeClient, cc.Recorder)\n\t\tlogger.Debug(ctx, \"New action\", \"name\", act.Name())\n\t\tif err := act.Execute(ctx, s); err != nil {\n\t\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, fmt.Sprintf(MessageClusterSyncFailed, errors.Cause(err)))\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (d *DBGenerator) setSubclusterDetail(ctx context.Context) error {\n\tq := Queries[SubclusterQueryKey]\n\trows, err := d.Conn.QueryContext(ctx, q)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t}\n\tdefer rows.Close()\n\n\t// Map to have fast lookup of subcluster name to index in the\n\t// d.Objs.Vdb.Spec.Subclusters array\n\tsubclusterInxMap := map[string]int{}\n\n\tfor rows.Next() {\n\t\tif rows.Err() != nil {\n\t\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, rows.Err())\n\t\t}\n\t\tvar name string\n\t\tvar isPrimary bool\n\t\tif err := rows.Scan(&name, &isPrimary); err != nil {\n\t\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t\t}\n\n\t\tif !vapi.IsValidSubclusterName(name) {\n\t\t\treturn fmt.Errorf(\"subcluster names are included in the name of statefulsets, but the name \"+\n\t\t\t\t\"'%s' cannot be used as it will violate Kubernetes naming. Please rename the subcluster and \"+\n\t\t\t\t\"retry this command again\", name)\n\t\t}\n\n\t\tinx, ok := subclusterInxMap[name]\n\t\tif !ok {\n\t\t\tinx = len(d.Objs.Vdb.Spec.Subclusters)\n\t\t\t// Add an empty subcluster. We increment the count a few lines down.\n\t\t\td.Objs.Vdb.Spec.Subclusters = append(d.Objs.Vdb.Spec.Subclusters,\n\t\t\t\tvapi.Subcluster{Name: name, Size: 0, IsPrimary: isPrimary})\n\t\t\tsubclusterInxMap[name] = inx\n\t\t}\n\t\td.Objs.Vdb.Spec.Subclusters[inx].Size++\n\n\t\t// Maintain the ReviveOrder. Update the count of the prior unless the\n\t\t// previous node was for a different subcluster.\n\t\trevSz := len(d.Objs.Vdb.Spec.ReviveOrder)\n\t\tif revSz == 0 || d.Objs.Vdb.Spec.ReviveOrder[revSz-1].SubclusterIndex != inx {\n\t\t\td.Objs.Vdb.Spec.ReviveOrder = append(d.Objs.Vdb.Spec.ReviveOrder, vapi.SubclusterPodCount{SubclusterIndex: inx, PodCount: 1})\n\t\t} else {\n\t\t\td.Objs.Vdb.Spec.ReviveOrder[revSz-1].PodCount++\n\t\t}\n\t}\n\n\tif len(subclusterInxMap) == 0 {\n\t\treturn errors.New(\"not subclusters found\")\n\t}\n\treturn nil\n}", "func (c *Client) UpdateCluster(ctx context.Context, params *UpdateClusterInput, optFns ...func(*Options)) (*UpdateClusterOutput, error) {\n\tif params == nil {\n\t\tparams = &UpdateClusterInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"UpdateCluster\", params, optFns, c.addOperationUpdateClusterMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*UpdateClusterOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (c *ClusterStatusManager) Update(cc *v1alpha1.CassandraCluster) error {\n\tcurrentStatus, err := c.getClusterStatus(cc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrentStatus.DeepCopyInto(&cc.Status)\n\treturn c.listerUpdater.Update(cc)\n}", "func PutColor(c *gin.Context) {\n\tid, err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tvar color RgbColor\n\tif err := c.ShouldBindJSON(&color); err != nil {\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tcolor.ID = id\n\n\tDb.Save(color)\n\n\tc.JSON(200, color)\n}", "func (a *Client) V2ResetCluster(ctx context.Context, params *V2ResetClusterParams) (*V2ResetClusterAccepted, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"v2ResetCluster\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v2/clusters/{cluster_id}/actions/reset\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V2ResetClusterReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V2ResetClusterAccepted), nil\n\n}", "func ExampleClustersClient_BeginUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewClustersClient().BeginUpdate(ctx, \"resRg\", \"myCluster\", armservicefabric.ClusterUpdateParameters{\n\t\tProperties: &armservicefabric.ClusterPropertiesUpdateParameters{\n\t\t\tEventStoreServiceEnabled: to.Ptr(true),\n\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t\t\t\t\t},\n\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"testnt1\"),\n\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](2000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](1000),\n\t\t\t\t\t},\n\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](0),\n\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](4000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](3000),\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](0),\n\t\t\t\t\tIsPrimary: to.Ptr(false),\n\t\t\t\t\tVMInstanceCount: to.Ptr[int32](3),\n\t\t\t\t}},\n\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelBronze),\n\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t\t\tUpgradePauseEndTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-25T22:00:00Z\"); return t }()),\n\t\t\tUpgradePauseStartTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-21T22:00:00Z\"); return t }()),\n\t\t\tUpgradeWave: to.Ptr(armservicefabric.ClusterUpgradeCadence(\"Wave\")),\n\t\t},\n\t\tTags: map[string]*string{\n\t\t\t\"a\": to.Ptr(\"b\"),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240744\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t\t\"a\": to.Ptr(\"b\"),\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t{\n\t// \t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t}},\n\t// \t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t}},\n\t// \t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t},\n\t// \t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t},\n\t// \t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t},\n\t// \t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t},\n\t// \t\tEventStoreServiceEnabled: to.Ptr(true),\n\t// \t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t}},\n\t// \t\t}},\n\t// \t\tManagementEndpoint: to.Ptr(\"http://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t},\n\t// \t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t},\n\t// \t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"testnt1\"),\n\t// \t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](2000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](1000),\n\t// \t\t\t\t},\n\t// \t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](0),\n\t// \t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](4000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](3000),\n\t// \t\t\t\t},\n\t// \t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](0),\n\t// \t\t\t\tIsPrimary: to.Ptr(false),\n\t// \t\t\t\tVMInstanceCount: to.Ptr[int32](3),\n\t// \t\t}},\n\t// \t\tNotifications: []*armservicefabric.Notification{\n\t// \t\t\t{\n\t// \t\t\t\tIsEnabled: to.Ptr(true),\n\t// \t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t// \t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelCritical),\n\t// \t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t// \t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t// \t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t// \t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t// \t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tIsEnabled: to.Ptr(true),\n\t// \t\t\t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t// \t\t\t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelAll),\n\t// \t\t\t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t// \t\t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t// \t\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t// \t\t\t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t// \t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelBronze),\n\t// \t\t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t// \t\t\t\t\t\tUpgradePauseEndTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-25T22:00:00Z\"); return t}()),\n\t// \t\t\t\t\t\tUpgradePauseStartTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-21T22:00:00Z\"); return t}()),\n\t// \t\t\t\t\t\tUpgradeWave: to.Ptr(armservicefabric.ClusterUpgradeCadenceWave2),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t}\n}", "func SetClusterInitializedStatus(restclient *rest.RESTClient, clusterName,\n\tnamespace string) error {\n\n\tcluster := crv1.Pgcluster{}\n\tif _, err := kubeapi.Getpgcluster(restclient, &cluster, clusterName,\n\t\tnamespace); err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\tmessage := \"Cluster has been initialized\"\n\tif err := kubeapi.PatchpgclusterStatus(restclient, crv1.PgclusterStateInitialized, message,\n\t\t&cluster, namespace); err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (a *Actuator) updateClusterObjectEndpoint(c *clusterv1.Cluster, m *clusterv1.Machine) error {\n\tif len(c.Status.APIEndpoints) == 0 {\n\t\tmasterIP, err := a.GetIP(c, m)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Status.APIEndpoints = append(c.Status.APIEndpoints,\n\t\t\tclusterv1.APIEndpoint{\n\t\t\t\tHost: masterIP,\n\t\t\t\tPort: apiServerPort,\n\t\t\t})\n\n\t\t_, err = a.v1Alpha1Client.Clusters(c.Namespace).UpdateStatus(c)\n\t\treturn err\n\t}\n\treturn nil\n}" ]
[ "0.7014352", "0.6270054", "0.62487537", "0.6182928", "0.60382164", "0.6028397", "0.60236835", "0.5977827", "0.5935069", "0.5872368", "0.58574164", "0.5826638", "0.5802612", "0.58015263", "0.57943857", "0.5768504", "0.57651097", "0.5699339", "0.5636026", "0.56122804", "0.5585891", "0.5566929", "0.5554513", "0.5525385", "0.5511016", "0.54916066", "0.5491442", "0.5467561", "0.54362226", "0.5433573", "0.5426142", "0.53953", "0.5388331", "0.5380473", "0.53514624", "0.5348105", "0.5316834", "0.529164", "0.52842695", "0.52694964", "0.5264386", "0.5248977", "0.52272034", "0.52248144", "0.52119976", "0.5198351", "0.51953954", "0.51885045", "0.51805437", "0.51559573", "0.51468575", "0.5142615", "0.5139324", "0.5119051", "0.5089856", "0.5084379", "0.50716347", "0.50689566", "0.505345", "0.5041749", "0.50362176", "0.5024971", "0.5022979", "0.502295", "0.50214046", "0.50137776", "0.50124973", "0.5009503", "0.49939138", "0.49900857", "0.4959476", "0.49519816", "0.4947644", "0.49447003", "0.49339536", "0.49305868", "0.49296072", "0.49295768", "0.49281073", "0.49280164", "0.49257642", "0.49247286", "0.4907814", "0.48993322", "0.48840952", "0.48768157", "0.48751882", "0.48719588", "0.48707286", "0.4847664", "0.48423758", "0.4838125", "0.48375875", "0.482652", "0.48065388", "0.48014134", "0.4793903", "0.47938484", "0.47860023", "0.47721624" ]
0.7731305
0
GetAllClusters retrieves all clusters in database
func (p PGSQLConnection) GetAllClusters() ([]ClusterModel, error) { clusters := []ClusterModel{} if err := p.connection.Select(&clusters, "SELECT * FROM clusters"); err != nil { return nil, err } return clusters, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (us *ClusterStore) GetAll() ([]model.Cluster, error) {\n\tvar cs []model.Cluster\n\tif err := us.db.Preload(clause.Associations).Find(&cs).Error; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn cs, nil\n}", "func ListAllClusters(response *JsonListClustersMap) *JsonListClustersMap {\n\tvar SIDCluster int\n\tvar SName string\n\tvar SAWSAccount int64\n\tvar SAWSRegion string\n\tvar SAWSEnvironment string\n\tvar SK8sVersion string\n\n\tvar SNodeType string\n\tvar SNodeInstance string\n\tvar STotalInstances int\n\n\tvar totalInstances int\n\n\tdescription := make(DescriptionMap)\n\n\tdb, err := sql.Open(\"mysql\", UserDB+\":\"+PassDB+\"@tcp(\"+HostDB+\":\"+PortDB+\")/\"+DatabaseDB+\"?charset=utf8\")\n\tcheckErr(err)\n\n\tdefer db.Close()\n\n\trows, err := db.Query(\"SELECT id_cluster, nome, aws_account, aws_region, aws_env, k8s_version FROM clusters ORDER BY nome\")\n\tcheckErr(err)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&SIDCluster, &SName, &SAWSAccount, &SAWSRegion, &SAWSEnvironment, &SK8sVersion)\n\t\tcheckErr(err)\n\n\t\tdescription = DescriptionMap{}\n\t\ttotalInstances = 0\n\n\t\trows1, err := db.Query(\"SELECT node_type, node_instance, total_instances FROM nodes WHERE id_cluster=?\", SIDCluster)\n\t\tcheckErr(err)\n\n\t\tfor rows1.Next() {\n\t\t\terr = rows1.Scan(&SNodeType, &SNodeInstance, &STotalInstances)\n\t\t\tcheckErr(err)\n\n\t\t\tdescription[SNodeType] = append(\n\t\t\t\tdescription[SNodeType],\n\t\t\t\tDescriptionStruct{\n\t\t\t\t\tDescription{\n\t\t\t\t\t\tType: SNodeInstance,\n\t\t\t\t\t\tTotalTypeInstances: STotalInstances,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\n\t\t\ttotalInstances = totalInstances + STotalInstances\n\t\t}\n\n\t\t*response = append(\n\t\t\t*response,\n\t\t\tjsonListClusters{\n\t\t\t\tClusterName: SName,\n\t\t\t\tAws: AWS{\n\t\t\t\t\tAccount: SAWSAccount,\n\t\t\t\t\tRegion: SAWSRegion,\n\t\t\t\t\tEnvironment: SAWSEnvironment,\n\t\t\t\t},\n\t\t\t\tK8SVersion: SK8sVersion,\n\t\t\t\tInstances: Instances{\n\t\t\t\t\tTotalInstances: totalInstances,\n\t\t\t\t\tDescription: description,\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\t}\n\n\treturn response\n}", "func (mcs *MySQLClusterService) GetAll() error {\n\tvar err error\n\tmcs.MySQLClusters, err = mcs.MySQLClusterRepo.GetAll()\n\n\treturn err\n}", "func ListAllCluster(c echo.Context) error {\n\tcblog.Info(\"call ListAllCluster()\")\n\n\tvar req struct {\n\t\tNameSpace string\n\t\tConnectionName string\n\t}\n\n\tif err := c.Bind(&req); err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\t// To support for Get-Query Param Type API\n\tif req.ConnectionName == \"\" {\n\t\treq.ConnectionName = c.QueryParam(\"ConnectionName\")\n\t}\n\n\t// Call common-runtime API\n\tallResourceList, err := cmrt.ListAllResource(req.ConnectionName, rsCluster)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\t// To support for Get-Query Param Type API\n\tif req.NameSpace == \"\" {\n\t\treq.NameSpace = c.QueryParam(\"NameSpace\")\n\t}\n\n\t// Resource Name has namespace prefix when from Tumblebug\n\tif req.NameSpace != \"\" {\n\t\tnameSpace := req.NameSpace + \"-\"\n\t\tfor idx, IID := range allResourceList.AllList.MappedList {\n\t\t\tif IID.NameId != \"\" {\n\t\t\t\tallResourceList.AllList.MappedList[idx].NameId = strings.Replace(IID.NameId, nameSpace, \"\", 1)\n\t\t\t}\n\t\t}\n\t\tfor idx, IID := range allResourceList.AllList.OnlySpiderList {\n\t\t\tif IID.NameId != \"\" {\n\t\t\t\tallResourceList.AllList.OnlySpiderList[idx].NameId = strings.Replace(IID.NameId, nameSpace, \"\", 1)\n\t\t\t}\n\t\t}\n\t\tfor idx, IID := range allResourceList.AllList.OnlyCSPList {\n\t\t\tif IID.NameId != \"\" {\n\t\t\t\tallResourceList.AllList.OnlyCSPList[idx].NameId = strings.Replace(IID.NameId, nameSpace, \"\", 1)\n\t\t\t}\n\t\t}\n\t}\n\n\tvar jsonResult struct {\n\t\tConnection string\n\t\tAllResourceList *cmrt.AllResourceList\n\t}\n\tjsonResult.Connection = req.ConnectionName\n\tjsonResult.AllResourceList = &allResourceList\n\n\treturn c.JSON(http.StatusOK, &jsonResult)\n}", "func (mcr *MiddlewareClusterRepo) GetAll() ([]metadata.MiddlewareCluster, error) {\n\tsql := `\n\t\tselect id, cluster_name, owner_id, env_id, del_flag, create_time, last_update_time\n\t\tfrom t_meta_middleware_cluster_info\n\t\twhere del_flag = 0\n\t\torder by id;\n\t`\n\tlog.Debugf(\"metadata MiddlewareClusterRepo.GetAll() sql: \\n%s\", sql)\n\n\tresult, err := mcr.Execute(sql)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// init []*MiddlewareClusterInfo\n\tmiddlewareClusterInfoList := make([]*MiddlewareClusterInfo, result.RowNumber())\n\tfor i := range middlewareClusterInfoList {\n\t\tmiddlewareClusterInfoList[i] = NewEmptyMiddlewareClusterInfoWithGlobal()\n\t}\n\t// map to struct\n\terr = result.MapToStructSlice(middlewareClusterInfoList, constant.DefaultMiddlewareTag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// init []dependency.Entity\n\tentityList := make([]metadata.MiddlewareCluster, result.RowNumber())\n\tfor i := range entityList {\n\t\tentityList[i] = middlewareClusterInfoList[i]\n\t}\n\n\treturn entityList, nil\n}", "func (e *BcsDataManager) GetAllClusterList(ctx context.Context, req *bcsdatamanager.GetClusterListRequest,\n\trsp *bcsdatamanager.GetClusterListResponse) error {\n\tblog.Infof(\"Received GetAllClusterList.Call request. Dimension:%s, page:%s, size:%s, startTime=%s, endTime=%s\",\n\t\treq.GetDimension(), req.GetPage(), req.GetSize(), time.Unix(req.GetStartTime(), 0),\n\t\ttime.Unix(req.GetEndTime(), 0))\n\tstart := time.Now()\n\tresult, total, err := e.model.GetClusterInfoList(ctx, req)\n\tif err != nil {\n\t\trsp.Message = fmt.Sprintf(\"get cluster list info error: %v\", err)\n\t\trsp.Code = bcsCommon.AdditionErrorCode + 500\n\t\tblog.Errorf(rsp.Message)\n\t\tprom.ReportAPIRequestMetric(\"GetAllClusterList\", \"grpc\", prom.StatusErr, start)\n\t\treturn nil\n\t}\n\trsp.Data = result\n\trsp.Message = bcsCommon.BcsSuccessStr\n\trsp.Code = bcsCommon.BcsSuccess\n\trsp.Total = uint32(total)\n\tprom.ReportAPIRequestMetric(\"GetAllClusterList\", \"grpc\", prom.StatusOK, start)\n\treturn nil\n}", "func (a ClustersAPI) List() ([]httpmodels.GetResp, error) {\n\tvar clusterList = struct {\n\t\tClusters []httpmodels.GetResp `json:\"clusters,omitempty\" url:\"clusters,omitempty\"`\n\t}{}\n\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/list\", nil, nil)\n\tif err != nil {\n\t\treturn clusterList.Clusters, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterList)\n\treturn clusterList.Clusters, err\n}", "func (c *Controller) findAllClusters() (map[string]spec.Cluster, error) {\n\tservices, err := c.config.Client.ListEtcdServices(\"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclusters := make(map[string]spec.Cluster)\n\tfor _, s := range services {\n\t\tclient := c.config.Client.Env(s.AccountId)\n\n\t\t// we need to update each service proactively to work around\n\t\t// bugs/limitation of rancher ui service creation\n\t\tif s.Scale > 0 {\n\t\t\ts2 := s\n\t\t\ts2.SelectorContainer = fmt.Sprintf(\"app=etcd,cluster=%s\", s.Id)\n\t\t\ts2.Scale = 0\n\t\t\ts2.StartOnCreate = false\n\t\t\t// we have to adjust the context here from global -> environment to make changes\n\t\t\tranchutil.SetResourceContext(&s.Resource, s.AccountId)\n\t\t\tif _, err := client.Service.Update(&s, &s2); err != nil {\n\t\t\t\tlog.Warnf(\"couldn't update service: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\t// we also need to fetch the stack this service belongs to so we can name\n\t\t// containers appropriately...\n\t\tstackName := \"unknown\"\n\t\tif st, err := client.Stack.ById(s.StackId); err == nil {\n\t\t\tstackName = st.Name\n\t\t}\n\n\t\tcluster := ranchutil.ClusterFromService(s, stackName)\n\t\tclusters[cluster.Metadata.Name] = cluster\n\t}\n\treturn clusters, nil\n}", "func listClusters(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {\n\tctx := r.Context()\n\tallowed := permission.Check(t, permission.PermClusterRead)\n\tif !allowed {\n\t\treturn permission.ErrUnauthorized\n\t}\n\tclusters, err := servicemanager.Cluster.List(ctx)\n\tif err != nil {\n\t\tif err == provTypes.ErrNoCluster {\n\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tadmin := permission.Check(t, permission.PermClusterAdmin)\n\tif !admin {\n\t\tfor i := range clusters {\n\t\t\tclusters[i].CleanUpSensitive()\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\treturn json.NewEncoder(w).Encode(clusters)\n}", "func (svc ServerlessClusterService) List(ctx context.Context) (*[]models.Cluster, *Response, error) {\n\tvar clusterList []models.Cluster\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"clusters\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: models.ClusterListInput{\n\t\t\tProductType: models.Starter,\n\t\t},\n\t\tResponse: clusterList,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &clusterList)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &clusterList, resp, err\n}", "func FetchClusters(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"Start listing clusters\")\n\n\tvar clusters []banzaiSimpleTypes.ClusterSimple\n\tvar response []*cloud.ClusterRepresentation\n\tdatabase.Find(&clusters)\n\n\tif len(clusters) <= 0 {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"No clusters found\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: \"No clusters found!\",\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, cl := range clusters {\n\t\tclust := cloud.GetClusterRepresentation(&cl)\n\t\tif clust != nil {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, fmt.Sprintf(\"Append %#v cluster representation to response\", clust))\n\t\t\tresponse = append(response, clust)\n\t\t}\n\n\t}\n\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusOK,\n\t\tcloud.JsonKeyData: response,\n\t})\n}", "func (c starterClusterServiceOp) List(ctx context.Context) (*[]models.Cluster, *Response, error) {\n\tvar clusterList []models.Cluster\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"clusters\",\n\t\tOperation: models.Query,\n\t\tInput: clusterList,\n\t\tArgs: models.ClusterListInput{\n\t\t\tProductType: models.Starter,\n\t\t},\n\t\tResponse: clusterList,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &clusterList)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &clusterList, resp, err\n}", "func (ds *DiscoveryService) ListClusters(request *restful.Request, response *restful.Response) {\n\tkey := request.Request.URL.String()\n\tout, cached := ds.cdsCache.cachedDiscoveryResponse(key)\n\tif !cached {\n\t\tif sc := request.PathParameter(ServiceCluster); sc != ds.mesh.IstioServiceCluster {\n\t\t\terrorResponse(response, http.StatusNotFound,\n\t\t\t\tfmt.Sprintf(\"Unexpected %s %q\", ServiceCluster, sc))\n\t\t\treturn\n\t\t}\n\n\t\t// service-node holds the IP address\n\t\tip := request.PathParameter(ServiceNode)\n\t\t// CDS computes clusters that are referenced by RDS routes for a particular proxy node\n\t\t// TODO: this implementation is inefficient as it is recomputing all the routes for all proxies\n\t\t// There is a lot of potential to cache and reuse cluster definitions across proxies and also\n\t\t// skip computing the actual HTTP routes\n\t\tinstances := ds.services.HostInstances(map[string]bool{ip: true})\n\t\tservices := ds.services.Services()\n\t\thttpRouteConfigs := buildOutboundHTTPRoutes(instances, services, &ProxyContext{\n\t\t\tDiscovery: ds.services,\n\t\t\tConfig: ds.config,\n\t\t\tMeshConfig: ds.mesh,\n\t\t\tIPAddress: ip,\n\t\t})\n\n\t\t// de-duplicate and canonicalize clusters\n\t\tclusters := httpRouteConfigs.clusters().normalize()\n\n\t\t// apply custom policies for HTTP clusters\n\t\tfor _, cluster := range clusters {\n\t\t\tinsertDestinationPolicy(ds.config, cluster)\n\t\t}\n\n\t\tvar err error\n\t\tif out, err = json.MarshalIndent(ClusterManager{Clusters: clusters}, \" \", \" \"); err != nil {\n\t\t\terrorResponse(response, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\t\tds.cdsCache.updateCachedDiscoveryResponse(key, out)\n\t}\n\twriteResponse(response, out)\n}", "func (c *ClientImpl) GetClusters(ctx context.Context, hcpHostURL string) (models.ClusterResp, error) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"Get Clusters\")\n\tdefer span.Finish()\n\n\tsession, err := c.getSession(ctx, hcpHostURL, hcpUserName, hcpPassword)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tstatus = Failure\n\tmonitor := metrics.StartExternalCall(externalSvcName, \"Fetch Clusters\")\n\tdefer func() { monitor.RecordWithStatus(status) }()\n\n\tresp, err := mlopsHttp.ExecuteHTTPRequest(\n\t\tctx,\n\t\tc.client,\n\t\thcpHostURL+clusterPathV2,\n\t\thttp.MethodGet,\n\t\tmap[string]string{sessionHeader: session},\n\t\tbytes.NewReader(nil),\n\t)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, errors.Wrapf(err, \"while fetching clusters in MLOps controller platform.\")\n\t}\n\tresp.Body.Close()\n\n\tstatus = Success\n\n\terr = c.deleteSession(ctx, hcpHostURL, session)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tclustersResp := models.ClusterResp{}\n\tjson.NewDecoder(resp.Body).Decode(&clustersResp)\n\n\treturn clustersResp, nil\n}", "func (m *CDatabase) FindAll() ([]Cluster, error) {\n\tvar clusters []Cluster\n\terr := db.C(COLLECTION).Find(bson.M{}).All(&clusters)\n\treturn clusters, err\n}", "func (a *Client) ListClusters(params *ListClustersParams, authInfo runtime.ClientAuthInfoWriter) (*ListClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListClustersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListClustersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListClustersOK), nil\n\n}", "func (e *ECS) ListClusters(req *ListClustersReq) (\n\t*ListClustersResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"ListClusters\")\n\tif req.MaxResults > 0 {\n\t\tparams[\"maxResults\"] = strconv.Itoa(int(req.MaxResults))\n\t}\n\tif req.NextToken != \"\" {\n\t\tparams[\"nextToken\"] = req.NextToken\n\t}\n\n\tresp := new(ListClustersResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (a *DefaultApiService) ListClusters(ctx _context.Context, localVarOptionals *ListClustersOpts) (Clusters, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Clusters\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/clusters\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Id.IsSet() {\n\t\tt:=localVarOptionals.Id.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotId.IsSet() {\n\t\tt:=localVarOptionals.NotId.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMin.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.min\", parameterToString(localVarOptionals.StoryCountMin.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMax.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.max\", parameterToString(localVarOptionals.StoryCountMax.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.start\", parameterToString(localVarOptionals.TimeStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.end\", parameterToString(localVarOptionals.TimeEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.start\", parameterToString(localVarOptionals.EarliestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.end\", parameterToString(localVarOptionals.EarliestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.start\", parameterToString(localVarOptionals.LatestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.end\", parameterToString(localVarOptionals.LatestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LocationCountry.IsSet() {\n\t\tt:=localVarOptionals.LocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotLocationCountry.IsSet() {\n\t\tt:=localVarOptionals.NotLocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Return_.IsSet() {\n\t\tt:=localVarOptionals.Return_.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortBy.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_by\", parameterToString(localVarOptionals.SortBy.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortDirection.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_direction\", parameterToString(localVarOptionals.SortDirection.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Cursor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cursor\", parameterToString(localVarOptionals.Cursor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PerPage.IsSet() {\n\t\tlocalVarQueryParams.Add(\"per_page\", parameterToString(localVarOptionals.PerPage.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"text/xml\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-ID\"] = key\n\t\t}\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-Key\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 422 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 429 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (c *Client) GetClustersSync(ctx context.Context) ([]*Cluster, error) {\n\tclusters := make([]*Cluster, 0)\n\n\tfor result := range c.GetClusters(ctx) {\n\t\tif result.Error != nil {\n\t\t\treturn nil, result.Error\n\t\t}\n\t\tclusters = append(clusters, result.Cluster)\n\t}\n\n\treturn clusters, nil\n}", "func (adm Admin) ListClusters() (string, error) {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect to zookeeper.\")\n\t\treturn \"\", err\n\t}\n\tdefer conn.Disconnect()\n\n\tvar clusters []string\n\n\tchildren, err := conn.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := conn.IsClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func (c Client) ListClusters() (ClusterList, error) {\n\tbody, err := c.watsonClient.MakeRequest(\"GET\", c.version+\"/solr_clusters\", nil, nil)\n\tif err != nil {\n\t\treturn ClusterList{}, err\n\t}\n\tvar response ClusterList\n\terr = json.Unmarshal(body, &response)\n\treturn response, err\n}", "func (c *ClustersController) List(ctx *app.ListClustersContext) error {\n\t// return a single cluster given its URL\n\tif ctx.ClusterURL != nil {\n\t\t// authorization is checked at the service level for more consistency accross the codebase.\n\t\tclustr, err := c.app.ClusterService().FindByURL(ctx, *ctx.ClusterURL)\n\t\tif err != nil {\n\t\t\tif ok, _ := errors.IsNotFoundError(err); ok {\n\t\t\t\t// no result found, return an empty array\n\t\t\t\treturn ctx.OK(&app.ClusterList{\n\t\t\t\t\tData: []*app.ClusterData{},\n\t\t\t\t})\n\t\t\t}\n\t\t\t// something wrong happened, return the error\n\t\t\treturn app.JSONErrorResponse(ctx, err)\n\t\t}\n\t\treturn ctx.OK(&app.ClusterList{\n\t\t\tData: []*app.ClusterData{convertToClusterData(*clustr)},\n\t\t})\n\t}\n\t// otherwise, list all clusters\n\tclusters, err := c.app.ClusterService().List(ctx, ctx.Type)\n\tif err != nil {\n\t\treturn app.JSONErrorResponse(ctx, err)\n\t}\n\tvar data []*app.ClusterData\n\tfor _, clustr := range clusters {\n\t\tdata = append(data, convertToClusterData(clustr))\n\t}\n\treturn ctx.OK(&app.ClusterList{\n\t\tData: data,\n\t})\n}", "func List() ([]clusterapi.Cluster, error) {\n\tvar clusterList []clusterapi.Cluster\n\terr := utils.BrowseMetadataContent(clusterapi.ClusterMetadataPrefix, func(buf *bytes.Buffer) error {\n\t\tvar c clusterapi.Cluster\n\t\terr := gob.NewDecoder(buf).Decode(&c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclusterList = append(clusterList, c)\n\t\treturn nil\n\t})\n\treturn clusterList, err\n}", "func (ch *ClusterHandler) GetClusters() app.Adapter {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tcontext := app.GetRequestContext(r)\n\n\t\t\tlogger := log.WithFields(log.Fields{\"package\": \"handlers\", \"event\": \"get_clusters\", \"request\": context.RequestId()})\n\n\t\t\tclusters, err := ch.service.GetClusters(context.RequestId())\n\t\t\tif err != nil {\n\t\t\t\tresponse := ErrorResponseAttributes{Title: \"get_clusters_error\", Detail: err.Error()}\n\t\t\t\tlogger.Error(err.Error())\n\t\t\t\trespondWithJson(w, newErrorResponse(&response, context.RequestId()), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trespondWithJson(w, newClustersResponse(clusters, context.RequestId()), http.StatusOK)\n\t\t})\n\t}\n}", "func (adm Admin) ListClusters() (string, error) {\n\tvar clusters []string\n\n\tchildren, err := adm.zkClient.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := adm.isClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func getAllElasticacheClusters(session *session.Session, region string, excludeAfter time.Time, configObj config.Config) ([]*string, error) {\n\tsvc := elasticache.New(session)\n\tresult, err := svc.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{})\n\tif err != nil {\n\t\treturn nil, errors.WithStackTrace(err)\n\t}\n\n\tvar clusterIds []*string\n\tfor _, cluster := range result.CacheClusters {\n\t\tif shouldIncludeElasticacheCluster(cluster, excludeAfter, configObj) {\n\t\t\tclusterIds = append(clusterIds, cluster.CacheClusterId)\n\t\t}\n\t}\n\n\treturn clusterIds, nil\n}", "func (c *Client) GetClusters(ctx context.Context) <-chan GetClusterResult {\n\t// TODO Make the concurrency configurable\n\tconcurrency := int(math.Min(5, float64(runtime.NumCPU())))\n\tresults := make(chan GetClusterResult, concurrency)\n\n\tclusterNames, err := c.GetClusterNames(ctx)\n\tif err != nil {\n\t\tclose(results)\n\t\treturn results\n\t}\n\n\tvar wg sync.WaitGroup\n\n\tgo func() {\n\t\tdefer close(results)\n\t\tfor _, clusterName := range clusterNames {\n\t\t\twg.Add(1)\n\t\t\tgo func(name string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcluster, err := c.GetCluster(ctx, name)\n\t\t\t\tresult := GetClusterResult{Cluster: cluster, Error: err}\n\t\t\t\tresults <- result\n\t\t\t}(clusterName)\n\t\t}\n\t\twg.Wait()\n\t}()\n\n\treturn results\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func handleGetClusters(c *Context, w http.ResponseWriter, r *http.Request) {\n\tpaging, err := parsePaging(r.URL)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to parse paging parameters\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfilter := &model.ClusterFilter{\n\t\tPaging: paging,\n\t}\n\n\tclusters, err := c.Store.GetClusterDTOs(filter)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to query clusters\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif clusters == nil {\n\t\tclusters = []*model.ClusterDTO{}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, clusters)\n}", "func GetAllRdsDBClusters(region string, out string) {\n\tawsSession, _ := InitAwsSession(region)\n\tsvc := rds.New(awsSession)\n\tinput := &rds.DescribeDBClustersInput{}\n\trdsClusters, _ := svc.DescribeDBClusters(input)\n\tprintRdsClusters(rdsClusters.DBClusters, region, out)\n}", "func (bc *Baiducloud) ListClusters(ctx context.Context) ([]string, error) {\n\treturn nil, fmt.Errorf(\"ListClusters unimplemented\")\n}", "func (c *Config) GetClusters(ctx context.Context, quiet bool, filterMap map[string]string, clustersName ...string) (string, error) {\n\tc.Logger.Debugf(\"Sending parameters to server to get the clusters %q\", strings.Join(clustersName, \", \"))\n\n\tfilter := MapToSlice(filterMap)\n\n\treturn c.RunGRPCnRESTFunc(\"get\", true,\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersGRPC(ctx, quiet, filter, clustersName...)\n\t\t},\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersHTTP(quiet, filter, clustersName...)\n\t\t})\n}", "func (d *Dao) OverlordClusters(c context.Context, zone, appid string) (ocs []*model.OverlordCluster, err error) {\n\tvar res struct {\n\t\tData []*model.OverlordApiserver `json:\"grouped_clusters\"`\n\t}\n\tif err = d.client.RESTfulGet(c, apiserverURI, \"\", nil, &res, appid); err != nil {\n\t\tlog.Error(\"overlord cluster url(%s) appid(%s) error(%v)\", apiserverURI, appid, err)\n\t\treturn\n\t}\nGETALL:\n\tfor _, oa := range res.Data {\n\t\tif zone == \"\" || oa.Group == zone {\n\t\t\tfor _, oc := range oa.Clusters {\n\t\t\t\tcluster := &model.OverlordCluster{\n\t\t\t\t\tName: oc.Name,\n\t\t\t\t\tType: oc.Type,\n\t\t\t\t\tZone: zone,\n\t\t\t\t\tHashMethod: \"fnv1a_64\",\n\t\t\t\t\tHashDistribution: \"ketama\",\n\t\t\t\t\tHashTag: \"{}\",\n\t\t\t\t\tListenProto: \"tcp\",\n\t\t\t\t\tListenAddr: net.JoinHostPort(\"0.0.0.0\", strconv.Itoa(oc.FrontEndPort)),\n\t\t\t\t\tDailTimeout: 1000,\n\t\t\t\t\tReadTimeout: 1000,\n\t\t\t\t\tWriteTimeout: 1000,\n\t\t\t\t\tNodeConn: 2,\n\t\t\t\t\tPingFailLimit: 3,\n\t\t\t\t\tPingAutoEject: true,\n\t\t\t\t}\n\t\t\t\tfor _, oci := range oc.Instances {\n\t\t\t\t\tif oc.Type == \"redis_cluster\" && oci.Role != \"master\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ton := &model.OverlordNode{\n\t\t\t\t\t\tAlias: oci.Alias,\n\t\t\t\t\t\tAddr: net.JoinHostPort(oci.IP, strconv.Itoa(oci.Port)),\n\t\t\t\t\t\tWeight: oci.Weight,\n\t\t\t\t\t}\n\t\t\t\t\tcluster.Nodes = append(cluster.Nodes, on)\n\t\t\t\t}\n\t\t\t\tocs = append(ocs, cluster)\n\t\t\t}\n\t\t}\n\t}\n\tif len(ocs) == 0 && zone != \"\" {\n\t\tzone = \"\"\n\t\tgoto GETALL\n\t}\n\treturn\n}", "func ListClusters(c *cli.Context) error {\n\tif err := printClusters(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p *Provider) List() ([]string, error) {\n\treturn p.provider.ListClusters()\n}", "func (a *ClustersApiService) ListClusters(ctx _context.Context, space string) ApiListClustersRequest {\n\treturn ApiListClustersRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tspace: space,\n\t}\n}", "func NewClusters(db *gorm.DB) *Clusters {\n\treturn &Clusters{db: db}\n}", "func (a *Client) VirtualizationClustersList(params *VirtualizationClustersListParams) (*VirtualizationClustersListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewVirtualizationClustersListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"virtualization_clusters_list\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/virtualization/clusters/\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &VirtualizationClustersListReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*VirtualizationClustersListOK), nil\n\n}", "func (s *Server) GetClusters() []*api.Cluster {\n\tinstances := s.doGetClusters()\n\tclusters := make([]*api.Cluster, len(instances))\n\tfor i, instance := range instances {\n\t\tclusters[i] = convertClusterToAPI(instance)\n\t}\n\treturn clusters\n}", "func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, *GetClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/kubernetes/clusters/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (a *ClustersApiService) ClusterServiceListClusters(ctx context.Context, body Servicev1ClusterQuery) (V1Clusterlist, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue V1Clusterlist\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/gitops/api/v1/clusters\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tlocalVarQueryParams.Add(\"routingId\", body.AccountIdentifier)\n\t// body params\n\tlocalVarPostBody = &body\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v V1Clusterlist\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v GatewayruntimeError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (metadata *metadataImpl) GetAllClusterInfo() map[string]config.ClusterInformation {\n\treturn metadata.clusterInfo\n}", "func getClusters(kubeconfig string) ([]string, error) {\n\tkubectlArgs := []string{\"kubectl\"}\n\tif kubeconfig != \"\" {\n\t\tkubectlArgs = append(kubectlArgs, fmt.Sprintf(\"--kubeconfig=%s\", kubeconfig))\n\t}\n\tcontextArgs := append(kubectlArgs, []string{\"config\", \"get-contexts\", \"-o=name\"}...)\n\toutput, err := runCommand(contextArgs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error in getting contexts from kubeconfig: %s\", err)\n\t}\n\treturn strings.Split(output, \"\\n\"), nil\n}", "func GetClusters(config core.Configuration, clusterID *string, localQuotaUsageOnly bool, withSubcapacities bool, dbi db.Interface, filter Filter) ([]*limes.ClusterReport, error) {\n\t//first query: collect project usage data in these clusters\n\tclusters := make(clusters)\n\tqueryStr, joinArgs := filter.PrepareQuery(clusterReportQuery1)\n\twhereStr, whereArgs := db.BuildSimpleWhereClause(makeClusterFilter(\"d\", clusterID), len(joinArgs))\n\terr := db.ForeachRow(db.DB, fmt.Sprintf(queryStr, whereStr), append(joinArgs, whereArgs...), func(rows *sql.Rows) error {\n\t\tvar (\n\t\t\tclusterID string\n\t\t\tserviceType *string\n\t\t\tresourceName *string\n\t\t\tprojectsQuota *uint64\n\t\t\tusage *uint64\n\t\t\tburstUsage *uint64\n\t\t\tminScrapedAt *util.Time\n\t\t\tmaxScrapedAt *util.Time\n\t\t)\n\t\terr := rows.Scan(&clusterID, &serviceType, &resourceName,\n\t\t\t&projectsQuota, &usage, &burstUsage,\n\t\t\t&minScrapedAt, &maxScrapedAt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, service, resource := clusters.Find(config, clusterID, serviceType, resourceName)\n\n\t\tclusterConfig, exists := config.Clusters[clusterID]\n\t\tclusterCanBurst := exists && clusterConfig.Config.Bursting.MaxMultiplier > 0\n\n\t\tif service != nil {\n\t\t\tif maxScrapedAt != nil {\n\t\t\t\tval := time.Time(*maxScrapedAt).Unix()\n\t\t\t\tservice.MaxScrapedAt = &val\n\t\t\t}\n\t\t\tif minScrapedAt != nil {\n\t\t\t\tval := time.Time(*minScrapedAt).Unix()\n\t\t\t\tservice.MinScrapedAt = &val\n\t\t\t}\n\t\t}\n\n\t\tif resource != nil {\n\t\t\tif projectsQuota != nil && resource.ExternallyManaged {\n\t\t\t\tresource.DomainsQuota = *projectsQuota\n\t\t\t}\n\t\t\tif usage != nil {\n\t\t\t\tresource.Usage = *usage\n\t\t\t}\n\t\t\tif clusterCanBurst && burstUsage != nil {\n\t\t\t\tresource.BurstUsage = *burstUsage\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//second query: collect domain quota data in these clusters\n\tqueryStr, joinArgs = filter.PrepareQuery(clusterReportQuery2)\n\twhereStr, whereArgs = db.BuildSimpleWhereClause(makeClusterFilter(\"d\", clusterID), len(joinArgs))\n\terr = db.ForeachRow(db.DB, fmt.Sprintf(queryStr, whereStr), append(joinArgs, whereArgs...), func(rows *sql.Rows) error {\n\t\tvar (\n\t\t\tclusterID string\n\t\t\tserviceType *string\n\t\t\tresourceName *string\n\t\t\tquota *uint64\n\t\t)\n\t\terr := rows.Scan(&clusterID, &serviceType, &resourceName, &quota)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, _, resource := clusters.Find(config, clusterID, serviceType, resourceName)\n\n\t\tif resource != nil && quota != nil && !resource.ExternallyManaged {\n\t\t\tresource.DomainsQuota = *quota\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//third query: collect capacity data for these clusters\n\tqueryStr, joinArgs = filter.PrepareQuery(clusterReportQuery3)\n\tif !withSubcapacities {\n\t\tqueryStr = strings.Replace(queryStr, \"cr.subcapacities\", \"''\", 1)\n\t}\n\twhereStr, whereArgs = db.BuildSimpleWhereClause(makeClusterFilter(\"cs\", clusterID), len(joinArgs))\n\terr = db.ForeachRow(db.DB, fmt.Sprintf(queryStr, whereStr), append(joinArgs, whereArgs...), func(rows *sql.Rows) error {\n\t\tvar (\n\t\t\tclusterID string\n\t\t\tserviceType string\n\t\t\tresourceName *string\n\t\t\trawCapacity *uint64\n\t\t\tcomment *string\n\t\t\tsubcapacities *string\n\t\t\tscrapedAt util.Time\n\t\t)\n\t\terr := rows.Scan(&clusterID, &serviceType, &resourceName, &rawCapacity, &comment, &subcapacities, &scrapedAt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcluster, _, resource := clusters.Find(config, clusterID, &serviceType, resourceName)\n\n\t\tif resource != nil {\n\t\t\tovercommitFactor := config.Clusters[clusterID].BehaviorForResource(serviceType, *resourceName).OvercommitFactor\n\t\t\tif overcommitFactor == 0 {\n\t\t\t\tresource.Capacity = rawCapacity\n\t\t\t} else {\n\t\t\t\tresource.RawCapacity = rawCapacity\n\t\t\t\tcapacity := uint64(float64(*rawCapacity) * overcommitFactor)\n\t\t\t\tresource.Capacity = &capacity\n\t\t\t}\n\t\t\tif comment != nil {\n\t\t\t\tresource.Comment = *comment\n\t\t\t}\n\t\t\tif subcapacities != nil && *subcapacities != \"\" {\n\t\t\t\tresource.Subcapacities = limes.JSONString(*subcapacities)\n\t\t\t}\n\t\t}\n\n\t\tif cluster != nil {\n\t\t\tscrapedAtUnix := time.Time(scrapedAt).Unix()\n\t\t\tif cluster.MaxScrapedAt == nil || *cluster.MaxScrapedAt < scrapedAtUnix {\n\t\t\t\tcluster.MaxScrapedAt = &scrapedAtUnix\n\t\t\t}\n\t\t\tif cluster.MinScrapedAt == nil || *cluster.MinScrapedAt > scrapedAtUnix {\n\t\t\t\tcluster.MinScrapedAt = &scrapedAtUnix\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//enumerate shared services\n\tisSharedService := make(map[string]bool)\n\tfor clusterID := range clusters {\n\t\tclusterConfig, exists := config.Clusters[clusterID]\n\t\tif exists {\n\t\t\tfor serviceType, shared := range clusterConfig.IsServiceShared {\n\t\t\t\tif shared {\n\t\t\t\t\tisSharedService[serviceType] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(isSharedService) > 0 {\n\n\t\tif !localQuotaUsageOnly {\n\n\t\t\t//fourth query: aggregate domain quota for shared services\n\t\t\tsharedQuotaSums := make(map[string]map[string]uint64)\n\n\t\t\tsharedServiceTypes := make([]string, 0, len(isSharedService))\n\t\t\tfor serviceType := range isSharedService {\n\t\t\t\tsharedServiceTypes = append(sharedServiceTypes, serviceType)\n\t\t\t}\n\t\t\twhereStr, queryArgs := db.BuildSimpleWhereClause(map[string]interface{}{\"ds.type\": sharedServiceTypes}, 0)\n\t\t\terr = db.ForeachRow(db.DB, fmt.Sprintf(clusterReportQuery4, whereStr), queryArgs, func(rows *sql.Rows) error {\n\t\t\t\tvar (\n\t\t\t\t\tserviceType string\n\t\t\t\t\tresourceName string\n\t\t\t\t\tquota uint64\n\t\t\t\t)\n\t\t\t\terr := rows.Scan(&serviceType, &resourceName, &quota)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif sharedQuotaSums[serviceType] == nil {\n\t\t\t\t\tsharedQuotaSums[serviceType] = make(map[string]uint64)\n\t\t\t\t}\n\t\t\t\tsharedQuotaSums[serviceType][resourceName] = quota\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t//fifth query: aggregate project quota for shared services\n\t\t\twhereStr, queryArgs = db.BuildSimpleWhereClause(map[string]interface{}{\"ps.type\": sharedServiceTypes}, 0)\n\t\t\tsharedUsageSums := make(map[string]map[string]uint64)\n\t\t\terr = db.ForeachRow(db.DB, fmt.Sprintf(clusterReportQuery5, whereStr), queryArgs, func(rows *sql.Rows) error {\n\t\t\t\tvar (\n\t\t\t\t\tserviceType string\n\t\t\t\t\tresourceName string\n\t\t\t\t\tusage uint64\n\t\t\t\t)\n\t\t\t\terr := rows.Scan(&serviceType, &resourceName, &usage)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif sharedUsageSums[serviceType] == nil {\n\t\t\t\t\tsharedUsageSums[serviceType] = make(map[string]uint64)\n\t\t\t\t}\n\t\t\t\tsharedUsageSums[serviceType][resourceName] = usage\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor _, cluster := range clusters {\n\t\t\t\tisSharedService := make(map[string]bool)\n\t\t\t\tfor serviceType, shared := range config.Clusters[cluster.ID].IsServiceShared {\n\t\t\t\t\t//NOTE: cluster config is guaranteed to exist due to earlier validation\n\t\t\t\t\tif shared {\n\t\t\t\t\t\tisSharedService[serviceType] = true\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tfor _, service := range cluster.Services {\n\t\t\t\t\tif isSharedService[service.Type] && sharedQuotaSums[service.Type] != nil {\n\t\t\t\t\t\tfor _, resource := range service.Resources {\n\t\t\t\t\t\t\tquota, exists := sharedQuotaSums[service.Type][resource.Name]\n\t\t\t\t\t\t\tif exists {\n\t\t\t\t\t\t\t\tresource.DomainsQuota = quota\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tusage, exists := sharedUsageSums[service.Type][resource.Name]\n\t\t\t\t\t\t\tif exists {\n\t\t\t\t\t\t\t\tresource.Usage = usage\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\t//third query again, but this time to collect shared capacities\n\t\tqueryStr, joinArgs = filter.PrepareQuery(clusterReportQuery3)\n\t\tif !withSubcapacities {\n\t\t\tqueryStr = strings.Replace(queryStr, \"cr.subcapacities\", \"''\", 1)\n\t\t}\n\t\tfilter := map[string]interface{}{\"cs.cluster_id\": \"shared\"}\n\t\twhereStr, whereArgs = db.BuildSimpleWhereClause(filter, len(joinArgs))\n\t\terr = db.ForeachRow(db.DB, fmt.Sprintf(queryStr, whereStr), append(joinArgs, whereArgs...), func(rows *sql.Rows) error {\n\t\t\tvar (\n\t\t\t\tsharedClusterID string\n\t\t\t\tserviceType string\n\t\t\t\tresourceName *string\n\t\t\t\trawCapacity *uint64\n\t\t\t\tcomment *string\n\t\t\t\tsubcapacities *string\n\t\t\t\tscrapedAt util.Time\n\t\t\t)\n\t\t\terr := rows.Scan(&sharedClusterID, &serviceType, &resourceName, &rawCapacity, &comment, &subcapacities, &scrapedAt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, cluster := range clusters {\n\t\t\t\tif !config.Clusters[cluster.ID].IsServiceShared[serviceType] {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t_, _, resource := clusters.Find(config, cluster.ID, &serviceType, resourceName)\n\n\t\t\t\tif resource != nil {\n\t\t\t\t\tovercommitFactor := config.Clusters[cluster.ID].BehaviorForResource(serviceType, *resourceName).OvercommitFactor\n\t\t\t\t\tif overcommitFactor == 0 {\n\t\t\t\t\t\tresource.Capacity = rawCapacity\n\t\t\t\t\t} else {\n\t\t\t\t\t\tresource.RawCapacity = rawCapacity\n\t\t\t\t\t\tcapacity := uint64(float64(*rawCapacity) * overcommitFactor)\n\t\t\t\t\t\tresource.Capacity = &capacity\n\t\t\t\t\t}\n\t\t\t\t\tif comment != nil {\n\t\t\t\t\t\tresource.Comment = *comment\n\t\t\t\t\t}\n\t\t\t\t\tif subcapacities != nil && *subcapacities != \"\" {\n\t\t\t\t\t\tresource.Subcapacities = limes.JSONString(*subcapacities)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tscrapedAtUnix := time.Time(scrapedAt).Unix()\n\t\t\t\tif cluster.MaxScrapedAt == nil || *cluster.MaxScrapedAt < scrapedAtUnix {\n\t\t\t\t\tcluster.MaxScrapedAt = &scrapedAtUnix\n\t\t\t\t}\n\t\t\t\tif cluster.MinScrapedAt == nil || *cluster.MinScrapedAt > scrapedAtUnix {\n\t\t\t\t\tcluster.MinScrapedAt = &scrapedAtUnix\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t}\n\n\t//flatten result (with stable order to keep the tests happy)\n\tids := make([]string, 0, len(clusters))\n\tfor id := range clusters {\n\t\tids = append(ids, id)\n\t}\n\tsort.Strings(ids)\n\tresult := make([]*limes.ClusterReport, len(clusters))\n\tfor idx, id := range ids {\n\t\tresult[idx] = clusters[id]\n\t}\n\n\treturn result, nil\n}", "func (c *Client) AllDatacenters() (datacenters []Datacenter, err error) {\n\tresp, err := c.Get(\"/datacenters\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar m []Datacenter\n\tif err := decodeBodyMap(resp.Body, &m); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}", "func ListAppsByClusters() JsonAppsByClustersMap {\n\tvar SClusterName string\n\tvar SNamespace string\n\tvar SAppName string\n\tvar SAppType string\n\tvar SHelmVersion string\n\tvar SHelmChart string\n\tvar SHelmAPPVersion string\n\tvar SHpaEnabled bool\n\tvar SVaultEnabled bool\n\n\tresponse := make(JsonAppsByClustersMap)\n\n\tdb, err := sql.Open(\"mysql\", UserDB+\":\"+PassDB+\"@tcp(\"+HostDB+\":\"+PortDB+\")/\"+DatabaseDB+\"?charset=utf8\")\n\tcheckErr(err)\n\n\tdefer db.Close()\n\n\trows, err := db.Query(\"SELECT clusters.nome, apps.namespace, apps.app, apps.type, IFNULL(helm.helm_version, \\\"\\\"), IFNULL(helm.chart, \\\"\\\"), IFNULL(helm.app_version, \\\"\\\"), apps.hpa_enabled, apps.vault_enabled FROM apps INNER JOIN clusters ON (apps.id_cluster=clusters.id_cluster) LEFT JOIN helm ON (apps.app=helm.app AND apps.namespace=helm.namespace AND apps.id_cluster=helm.id_cluster) ORDER BY apps.namespace,apps.app\")\n\tcheckErr(err)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&SClusterName, &SNamespace, &SAppName, &SAppType, &SHelmVersion, &SHelmChart, &SHelmAPPVersion, &SHpaEnabled, &SVaultEnabled)\n\t\tcheckErr(err)\n\n\t\tresponse[SClusterName] = append(\n\t\t\tresponse[SClusterName],\n\t\t\tjsonAppsByClusters{\n\t\t\t\tName: SAppName,\n\t\t\t\tNamespace: SNamespace,\n\t\t\t\tType: SAppType,\n\t\t\t\tHpaEnabled: SHpaEnabled,\n\t\t\t\tVaultEnabled: SVaultEnabled,\n\t\t\t\tHelm: Helm{\n\t\t\t\t\tVersion: SHelmVersion,\n\t\t\t\t\tChart: SHelmChart,\n\t\t\t\t\tAPPVersion: SHelmAPPVersion,\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\t}\n\n\treturn response\n}", "func clusterList() []string {\n\tif c := envy.String(\"DQLITED_CLUSTER\"); c != \"\" {\n\t\treturn strings.Split(c, \",\")\n\t}\n\treturn defaultCluster\n}", "func (c *Client) ListClustersForProjectAndDatacenter(projectID string, seed string) ([]models.Cluster, error) {\n\treq, err := c.newRequest(\"GET\", projectPath+\"/\"+projectID+datacenterSubPath+\"/\"+seed+clustersSubPath, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := make([]models.Cluster, 0)\n\n\tresp, err := c.do(req, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// StatusCodes 401 and 403 mean empty response and should be treated as such\n\tif resp.StatusCode == 401 || resp.StatusCode == 403 {\n\t\treturn nil, nil\n\t}\n\n\tif resp.StatusCode >= 299 {\n\t\treturn nil, errors.New(\"Got non-2xx return code: \" + strconv.Itoa(resp.StatusCode))\n\t}\n\n\treturn result, nil\n}", "func (api *clusterAPI) List(ctx context.Context, opts *api.ListWatchOptions) ([]*Cluster, error) {\n\tvar objlist []*Cluster\n\tobjs, err := api.ct.List(\"Cluster\", ctx, opts)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, obj := range objs {\n\t\tswitch tp := obj.(type) {\n\t\tcase *Cluster:\n\t\t\teobj := obj.(*Cluster)\n\t\t\tobjlist = append(objlist, eobj)\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Got invalid object type %v while looking for Cluster\", tp)\n\t\t}\n\t}\n\n\treturn objlist, nil\n}", "func (a *ClustersApiService) ListClustersExecute(r ApiListClustersRequest) (ListClustersResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ListClustersResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ClustersApiService.ListClusters\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/spaces/{space}/clusters\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"space\"+\"}\", _neturl.PathEscape(parameterToString(r.space, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (m *Manager) GetClusterList() ([]Cluster, error) {\n\tnames, err := m.specManager.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar clusters = []Cluster{}\n\n\tfor _, name := range names {\n\t\tmetadata, err := m.meta(name)\n\t\tif err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) &&\n\t\t\t!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {\n\t\t\treturn nil, perrs.Trace(err)\n\t\t}\n\n\t\tbase := metadata.GetBaseMeta()\n\n\t\tclusters = append(clusters, Cluster{\n\t\t\tName: name,\n\t\t\tUser: base.User,\n\t\t\tVersion: base.Version,\n\t\t\tPath: m.specManager.Path(name),\n\t\t\tPrivateKey: m.specManager.Path(name, \"ssh\", \"id_rsa\"),\n\t\t})\n\t}\n\n\treturn clusters, nil\n}", "func (m *Manager) ListCluster() error {\n\tclusters, err := m.GetClusterList()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch m.logger.GetDisplayMode() {\n\tcase logprinter.DisplayModeJSON:\n\t\tclusterObj := struct {\n\t\t\tClusters []Cluster `json:\"clusters\"`\n\t\t}{\n\t\t\tClusters: clusters,\n\t\t}\n\t\tdata, err := json.Marshal(clusterObj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(data))\n\tdefault:\n\t\tclusterTable := [][]string{\n\t\t\t// Header\n\t\t\t{\"Name\", \"User\", \"Version\", \"Path\", \"PrivateKey\"},\n\t\t}\n\t\tfor _, v := range clusters {\n\t\t\tclusterTable = append(clusterTable, []string{\n\t\t\t\tv.Name,\n\t\t\t\tv.User,\n\t\t\t\tv.Version,\n\t\t\t\tv.Path,\n\t\t\t\tv.PrivateKey,\n\t\t\t})\n\t\t}\n\t\ttui.PrintTable(clusterTable, true)\n\t}\n\treturn nil\n}", "func (s *clusterService) Clusters(ctx context.Context, options ...rest.HTTPClientOption) ([]cluster.Cluster, error) {\n\t_, err := Start(ctx, s.Factories().ClusterCacheFactory(), options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclusterCache.RLock()\n\tdefer clusterCache.RUnlock()\n\n\treturn Clusters(clusterCache.Clusters()), nil\n}", "func (q *QueryResolver) Clusters(ctx context.Context) ([]*ClusterInfoResolver, error) {\n\tgrpcAPI := q.Env.VizierClusterInfo\n\tresp, err := grpcAPI.GetClusterInfo(ctx, &cloudpb.GetClusterInfoRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar res []*ClusterInfoResolver\n\tfor _, cluster := range resp.Clusters {\n\t\tresolver, err := clusterInfoToResolver(cluster)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = append(res, resolver)\n\t}\n\treturn res, nil\n}", "func (s *RaftDatabase) Clusters() int {\n\treturn GetArg(s.name, \"clusters\").Int(s.clusters)\n}", "func GetAllClusterNode(client redigo.Conn, role string, choose string) ([]string, error) {\n\tret, err := client.Do(\"cluster\", \"nodes\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnodeList := ParseClusterNode(ret.([]byte))\n\tnodeListChoose := ClusterNodeChoose(nodeList, role)\n\n\tresult := make([]string, 0, len(nodeListChoose))\n\tfor _, ele := range nodeListChoose {\n\t\tif choose == \"id\" {\n\t\t\tresult = append(result, ele.Id)\n\t\t} else {\n\t\t\tresult = append(result, ele.Address)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func ExampleClustersClient_List() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().List(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ClusterListResult = armservicefabric.ClusterListResult{\n\t// \tValue: []*armservicefabric.Cluster{\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"myCluster\"),\n\t// \t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\tTags: map[string]*string{\n\t// \t\t\t},\n\t// \t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(false),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t\t\t},\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"myCluster2\"),\n\t// \t\t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\t\tEtag: to.Ptr(\"W/\\\"636462502164040075\\\"\"),\n\t// \t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster2\"),\n\t// \t\t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\t\tTags: map[string]*string{\n\t// \t\t\t\t},\n\t// \t\t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager)},\n\t// \t\t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentLinux),\n\t// \t\t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\t\tClusterID: to.Ptr(\"2747e469-b24e-4039-8a0a-46151419523f\"),\n\t// \t\t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tManagementEndpoint: to.Ptr(\"http://myCluster2.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\t\tVMImage: to.Ptr(\"Ubuntu\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t}},\n\t// \t\t}\n}", "func (c *Client) ListClustersForProject(projectID string) ([]models.Cluster, error) {\n\treq, err := c.newRequest(\"GET\", projectPath+\"/\"+projectID+clustersSubPath, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := make([]models.Cluster, 0)\n\n\tresp, err := c.do(req, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// StatusCodes 401 and 403 mean empty response and should be treated as such\n\tif resp.StatusCode == 401 || resp.StatusCode == 403 {\n\t\treturn nil, nil\n\t}\n\n\tif resp.StatusCode >= 299 {\n\t\treturn nil, errors.New(\"Got non-2xx return code: \" + strconv.Itoa(resp.StatusCode))\n\t}\n\n\treturn result, nil\n}", "func (a ClustersAPI) Get(clusterID string) (httpmodels.GetResp, error) {\n\tvar clusterInfo httpmodels.GetResp\n\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/get\", data, nil)\n\tif err != nil {\n\t\treturn clusterInfo, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterInfo)\n\treturn clusterInfo, err\n}", "func Clusters() (clusters map[string][]string) {\n\tclusters = make(map[string][]string)\n\tif addr := AccessConsulAddr(); addr != \"\" && Region() != \"\" {\n\t\treturn getClustersFromConsul(addr, Region())\n\t}\n\tcs := Get(\"Key-ClusterMgrCluster\").(map[string]string)\n\tfor key, value := range cs {\n\t\tclusters[key] = strings.Split(value, \" \")\n\t}\n\treturn\n}", "func (us *ClusterStore) GetAllByName(name string) ([]model.Cluster, error) {\n\tvar clusters []model.Cluster\n\tif err := us.db.\n\t\tPreload(clause.Associations).\n\t\tWhere(&model.Cluster{ClusterName: name}).\n\t\tFind(&clusters).\n\t\tError; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn clusters, nil\n}", "func Clusters(api API) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tclusters := api.Clusters()\n\t\tm := make(map[string]map[string]any, len(clusters))\n\t\tfor _, c := range clusters {\n\t\t\tm[c.ID] = c.Debug()\n\t\t}\n\n\t\tdata, err := json.Marshal(m)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"could not marshal cluster debug map: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(data)\n\t\tw.Write([]byte(\"\\n\"))\n\t}\n}", "func (ck *clusterKinds) getAll() map[string]bool {\n\treturn ck.isNamespaced\n}", "func (c *krakenClusters) List(opts v1.ListOptions) (result *v1alpha1.KrakenClusterList, err error) {\n\tresult = &v1alpha1.KrakenClusterList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"krakenclusters\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func TestContainerEngineClient_ListClusters(t *testing.T) {\n\tc, clerr := containerengine.NewContainerEngineClientWithConfigurationProvider(configurationProvider())\n\tfailIfError(t, clerr)\n\n\t// list events for last 5 hour\n\treq := containerengine.ListClustersRequest{\n\t\tCompartmentId: common.String(getCompartmentID()),\n\t}\n\n\tresp, err := c.ListClusters(context.Background(), req)\n\tfailIfError(t, err)\n\tassert.NotEmpty(t, resp)\n}", "func (s *StorageClusterAPI) List(w http.ResponseWriter, r *http.Request) {\n\tclusters, err := s.storageClusterService.List()\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\tapi.OK(w, clusters)\n}", "func (e *ECS) DescribeClusters(req *DescribeClustersReq) (*DescribeClustersResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"DescribeClusters\")\n\tif len(req.Clusters) > 0 {\n\t\taddParamsList(params, \"clusters.member\", req.Clusters)\n\t}\n\n\tresp := new(DescribeClustersResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (a *Client) VirtualizationClustersRead(params *VirtualizationClustersReadParams) (*VirtualizationClustersReadOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewVirtualizationClustersReadParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"virtualization_clusters_read\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/virtualization/clusters/{id}/\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &VirtualizationClustersReadReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*VirtualizationClustersReadOK), nil\n\n}", "func (c *cluster) GetAllNids() (result []Nid) {\n c.lock.RLock()\n defer c.lock.RUnlock()\n\n result = make([]Nid, 0, len(c.nodes))\n for nid, _ := range c.nodes {\n result = append(result, nid)\n }\n\n return result\n}", "func (s *databaseClusterLister) List(selector labels.Selector) (ret []*v1alpha1.DatabaseCluster, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.DatabaseCluster))\n\t})\n\treturn ret, err\n}", "func (client RoverClusterClient) listRoverClusters(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/roverClusters\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListRoverClustersResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/rover/20201210/RoverCluster/ListRoverClusters\"\n\t\terr = common.PostProcessServiceError(err, \"RoverCluster\", \"ListRoverClusters\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (a *Client) ListAvailableClusters(ctx context.Context, params *ListAvailableClustersParams) (*ListAvailableClustersOK, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listAvailableClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/heappe/ClusterInformation/ListAvailableClusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListAvailableClustersReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListAvailableClustersOK), nil\n\n}", "func (elementConfiguration *ElementConfiguration) ListClusters() ([]string, error) {\n\t// collect names\n\tclusterConfigurations := []string{}\n\n elementConfiguration.ClustersX.RLock()\n\tfor clusterConfiguration := range elementConfiguration.Clusters {\n\t\tclusterConfigurations = append(clusterConfigurations, clusterConfiguration)\n\t}\n\telementConfiguration.ClustersX.RUnlock()\n\n\t// success\n\treturn clusterConfigurations, nil\n}", "func GetAllContainersNotInCluster(dbConn *sql.DB) ([]Container, error) {\n\tvar rows *sql.Rows\n\tvar err error\n\tqueryStr := fmt.Sprintf(\"select c.id, c.name, c.clusterid, c.serverid, c.role, c.image, to_char(c.createdt, 'MM-DD-YYYY HH24:MI:SS'), p.id, p.name, s.name, l.name from project p, server s , container c left join cluster l on c.clusterid = l.id where c.role != 'standalone' and c.clusterid = -1 and c.projectid = p.id and c.serverid = s.id order by c.name\")\n\tlogit.Info.Println(\"admindb:GetAllContainersNotInCluster:\" + queryStr)\n\trows, err = dbConn.Query(queryStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\tcontainers := make([]Container, 0)\n\tfor rows.Next() {\n\t\tcontainer := Container{}\n\t\tif err = rows.Scan(&container.ID, &container.Name, &container.ClusterID, &container.ServerID, &container.Role, &container.Image, &container.CreateDate, &container.ProjectID, &container.ProjectName, &container.ServerName, &container.ClusterName); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcontainer.ClusterName = container.ClusterID\n\t\tcontainers = append(containers, container)\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn containers, nil\n}", "func (sqlStore *SQLStore) GetClusterInstallations(filter *model.ClusterInstallationFilter) ([]*model.ClusterInstallation, error) {\n\treturn sqlStore.getClusterInstallations(sqlStore.db, filter)\n}", "func (c *Container) GetClusterNodes(ctx echo.Context) error {\n response := models.ClusterNodesResponse{\n Data: []models.NodeData{},\n }\n tabletServersFuture := make(chan helpers.TabletServersFuture)\n clusterConfigFuture := make(chan helpers.ClusterConfigFuture)\n go helpers.GetTabletServersFuture(helpers.HOST, tabletServersFuture)\n go helpers.GetClusterConfigFuture(helpers.HOST, clusterConfigFuture)\n tabletServersResponse := <-tabletServersFuture\n if tabletServersResponse.Error != nil {\n return ctx.String(http.StatusInternalServerError,\n tabletServersResponse.Error.Error())\n }\n // Use the cluster config API to get the read-replica (If any) placement UUID\n clusterConfigResponse := <-clusterConfigFuture\n readReplicaUuid := \"\"\n if clusterConfigResponse.Error == nil {\n for _, replica := range clusterConfigResponse.\n ClusterConfig.ReplicationInfo.ReadReplicas {\n readReplicaUuid = replica.PlacementUuid\n }\n }\n mastersFuture := make(chan helpers.MastersFuture)\n go helpers.GetMastersFuture(helpers.HOST, mastersFuture)\n\n nodeList := helpers.GetNodesList(tabletServersResponse)\n versionInfoFutures := map[string]chan helpers.VersionInfoFuture{}\n for _, nodeHost := range nodeList {\n versionInfoFuture := make(chan helpers.VersionInfoFuture)\n versionInfoFutures[nodeHost] = versionInfoFuture\n go helpers.GetVersionFuture(nodeHost, versionInfoFuture)\n }\n activeYsqlConnectionsFutures := map[string]chan helpers.ActiveYsqlConnectionsFuture{}\n activeYcqlConnectionsFutures := map[string]chan helpers.ActiveYcqlConnectionsFuture{}\n masterMemTrackersFutures := map[string]chan helpers.MemTrackersFuture{}\n tserverMemTrackersFutures := map[string]chan helpers.MemTrackersFuture{}\n for _, nodeHost := range nodeList {\n activeYsqlConnectionsFuture := make(chan helpers.ActiveYsqlConnectionsFuture)\n activeYsqlConnectionsFutures[nodeHost] = activeYsqlConnectionsFuture\n go helpers.GetActiveYsqlConnectionsFuture(nodeHost, activeYsqlConnectionsFuture)\n activeYcqlConnectionsFuture := make(chan helpers.ActiveYcqlConnectionsFuture)\n activeYcqlConnectionsFutures[nodeHost] = activeYcqlConnectionsFuture\n go helpers.GetActiveYcqlConnectionsFuture(nodeHost, activeYcqlConnectionsFuture)\n masterMemTrackerFuture := make(chan helpers.MemTrackersFuture)\n masterMemTrackersFutures[nodeHost] = masterMemTrackerFuture\n go helpers.GetMemTrackersFuture(nodeHost, true, masterMemTrackerFuture)\n tserverMemTrackerFuture := make(chan helpers.MemTrackersFuture)\n tserverMemTrackersFutures[nodeHost] = tserverMemTrackerFuture\n go helpers.GetMemTrackersFuture(nodeHost, false, tserverMemTrackerFuture)\n }\n masters := map[string]helpers.Master{}\n mastersResponse := <-mastersFuture\n if mastersResponse.Error == nil {\n for _, master := range mastersResponse.Masters {\n if len(master.Registration.PrivateRpcAddresses) > 0 {\n masters[master.Registration.PrivateRpcAddresses[0].Host] = master\n }\n }\n }\n currentTime := time.Now().UnixMicro()\n hostToUuid, errHostToUuidMap := helpers.GetHostToUuidMap(helpers.HOST)\n for placementUuid, obj := range tabletServersResponse.Tablets {\n // Cross check the placement UUID of the node with that of read-replica cluster\n isReadReplica := false\n if readReplicaUuid == placementUuid {\n isReadReplica = true\n }\n for hostport, nodeData := range obj {\n host, _, err := net.SplitHostPort(hostport)\n // If we can split hostport, just use host as name.\n // Otherwise, use hostport as name.\n // However, we can only get version information if we can get the host\n hostName := hostport\n versionNumber := \"\"\n activeYsqlConnections := int64(0)\n activeYcqlConnections := int64(0)\n isMasterUp := true\n ramUsedTserver := int64(0)\n ramUsedMaster := int64(0)\n ramLimitTserver := int64(0)\n ramLimitMaster := int64(0)\n masterUptimeUs := int64(0)\n totalDiskBytes := int64(0)\n if err == nil {\n hostName = host\n versionInfo := <-versionInfoFutures[hostName]\n if versionInfo.Error == nil {\n versionNumber = versionInfo.VersionInfo.VersionNumber\n }\n ysqlConnections := <-activeYsqlConnectionsFutures[hostName]\n if ysqlConnections.Error == nil {\n activeYsqlConnections += ysqlConnections.YsqlConnections\n }\n ycqlConnections := <-activeYcqlConnectionsFutures[hostName]\n if ycqlConnections.Error == nil {\n activeYcqlConnections += ycqlConnections.YcqlConnections\n }\n masterMemTracker := <-masterMemTrackersFutures[hostName]\n if masterMemTracker.Error == nil {\n ramUsedMaster = masterMemTracker.Consumption\n ramLimitMaster = masterMemTracker.Limit\n }\n tserverMemTracker := <-tserverMemTrackersFutures[hostName]\n if tserverMemTracker.Error == nil {\n ramUsedTserver = tserverMemTracker.Consumption\n ramLimitTserver = tserverMemTracker.Limit\n }\n if master, ok := masters[hostName]; ok {\n isMasterUp = master.Error == nil\n if isMasterUp {\n masterUptimeUs = currentTime - master.InstanceId.StartTimeUs\n }\n }\n if errHostToUuidMap == nil {\n query :=\n fmt.Sprintf(QUERY_LIMIT_ONE, \"system.metrics\", \"total_disk\",\n hostToUuid[hostName])\n session, err := c.GetSession()\n if err == nil {\n iter := session.Query(query).Iter()\n var ts int64\n var value int64\n var details string\n iter.Scan(&ts, &value, &details)\n totalDiskBytes = value\n }\n }\n }\n totalSstFileSizeBytes := int64(nodeData.TotalSstFileSizeBytes)\n uncompressedSstFileSizeBytes :=\n int64(nodeData.UncompressedSstFileSizeBytes)\n userTabletsTotal := int64(nodeData.UserTabletsTotal)\n userTabletsLeaders := int64(nodeData.UserTabletsLeaders)\n systemTabletsTotal := int64(nodeData.SystemTabletsTotal)\n systemTabletsLeaders := int64(nodeData.SystemTabletsLeaders)\n activeConnections := models.NodeDataMetricsActiveConnections{\n Ysql: activeYsqlConnections,\n Ycql: activeYcqlConnections,\n }\n ramUsedBytes := ramUsedMaster + ramUsedTserver\n ramProvisionedBytes := ramLimitMaster + ramLimitTserver\n isBootstrapping := true\n // For now we hard code isBootstrapping here, and we use the\n // GetIsLoadBalancerIdle endpoint separately to determine if\n // a node is bootstrapping on the frontend, since yb-admin is a\n // bit slow. Once we get a faster way of doing this we can move\n // the implementation here.\n // For now, assuming that IsMaster and IsTserver are always true\n // The UI frontend doesn't use these values so this should be ok for now\n response.Data = append(response.Data, models.NodeData{\n Name: hostName,\n Host: hostName,\n IsNodeUp: nodeData.Status == \"ALIVE\",\n IsMaster: true,\n IsTserver: true,\n IsReadReplica: isReadReplica,\n IsMasterUp: isMasterUp,\n IsBootstrapping: isBootstrapping,\n Metrics: models.NodeDataMetrics{\n // Eventually we want to change models.NodeDataMetrics so that\n // all the int64 fields are uint64. But currently openapi\n // generator only generates int64s. Ideally if we set\n // minimum: 0 in the specs, the generator should use uint64.\n // We should try to implement this into openapi-generator.\n MemoryUsedBytes: int64(nodeData.RamUsedBytes),\n TotalSstFileSizeBytes: &totalSstFileSizeBytes,\n UncompressedSstFileSizeBytes: &uncompressedSstFileSizeBytes,\n ReadOpsPerSec: nodeData.ReadOpsPerSec,\n WriteOpsPerSec: nodeData.WriteOpsPerSec,\n TimeSinceHbSec: nodeData.TimeSinceHbSec,\n UptimeSeconds: int64(nodeData.UptimeSeconds),\n UserTabletsTotal: userTabletsTotal,\n UserTabletsLeaders: userTabletsLeaders,\n SystemTabletsTotal: systemTabletsTotal,\n SystemTabletsLeaders: systemTabletsLeaders,\n ActiveConnections: activeConnections,\n MasterUptimeUs: masterUptimeUs,\n RamUsedBytes: ramUsedBytes,\n RamProvisionedBytes: ramProvisionedBytes,\n DiskProvisionedBytes: totalDiskBytes,\n },\n CloudInfo: models.NodeDataCloudInfo{\n Cloud: nodeData.Cloud,\n Region: nodeData.Region,\n Zone: nodeData.Zone,\n },\n SoftwareVersion: versionNumber,\n })\n }\n }\n sort.Slice(response.Data, func(i, j int) bool {\n return response.Data[i].Name < response.Data[j].Name\n })\n return ctx.JSON(http.StatusOK, response)\n}", "func (a *Client) V2ListClusters(ctx context.Context, params *V2ListClustersParams) (*V2ListClustersOK, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"v2ListClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v2/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V2ListClustersReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V2ListClustersOK), nil\n\n}", "func (c *ZkCluster) ConnectAll() (*enhanced.Client, error) {\n\tvar servers = strings.Split(c.ConnectionString(), \",\")\n\treturn enhanced.Connect(servers, time.Second)\n}", "func (a *Client) ListEdgeClusters(params *ListEdgeClustersParams, authInfo runtime.ClientAuthInfoWriter) (*ListEdgeClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListEdgeClustersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListEdgeClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/edge-clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListEdgeClustersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListEdgeClustersOK), nil\n\n}", "func (a *Client) GetCombinedCloudClusters(params *GetCombinedCloudClustersParams, opts ...ClientOption) (*GetCombinedCloudClustersOK, *GetCombinedCloudClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetCombinedCloudClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetCombinedCloudClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/cloud_cluster/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetCombinedCloudClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetCombinedCloudClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetCombinedCloudClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (nh *NodeHost) Clusters() []*node {\n\tresult := make([]*node, 0)\n\tnh.clusterMu.RLock()\n\tnh.clusterMu.clusters.Range(func(k, v interface{}) bool {\n\t\tresult = append(result, v.(*node))\n\t\treturn true\n\t})\n\tnh.clusterMu.RUnlock()\n\n\treturn result\n}", "func (api *clusterAPI) ApisrvList(ctx context.Context, opts *api.ListWatchOptions) ([]*cluster.Cluster, error) {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn apicl.ClusterV1().Cluster().List(context.Background(), opts)\n\t}\n\n\t// List from local cache\n\tctkitObjs, err := api.List(ctx, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret []*cluster.Cluster\n\tfor _, obj := range ctkitObjs {\n\t\tret = append(ret, &obj.Cluster)\n\t}\n\treturn ret, nil\n}", "func ExampleSnowball_ListClusters_shared00() {\n\tsvc := snowball.New(session.New())\n\tinput := &snowball.ListClustersInput{}\n\n\tresult, err := svc.ListClusters(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase snowball.ErrCodeInvalidNextTokenException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidNextTokenException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func ExampleClustersClient_ListByResourceGroup() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().ListByResourceGroup(ctx, \"resRg\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ClusterListResult = armservicefabric.ClusterListResult{\n\t// \tValue: []*armservicefabric.Cluster{\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"myCluster\"),\n\t// \t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\tTags: map[string]*string{\n\t// \t\t\t},\n\t// \t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(false),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t\t\t},\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"myCluster2\"),\n\t// \t\t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\t\tEtag: to.Ptr(\"W/\\\"636462502164040075\\\"\"),\n\t// \t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster2\"),\n\t// \t\t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\t\tTags: map[string]*string{\n\t// \t\t\t\t},\n\t// \t\t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager)},\n\t// \t\t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentLinux),\n\t// \t\t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\t\tClusterID: to.Ptr(\"2747e469-b24e-4039-8a0a-46151419523f\"),\n\t// \t\t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tManagementEndpoint: to.Ptr(\"http://myCluster2.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\t\tVMImage: to.Ptr(\"Ubuntu\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t}},\n\t// \t\t}\n}", "func (vc *VirtualCenter) GetDatastoresByCluster(ctx context.Context,\n\tclusterMorefValue string) ([]*DatastoreInfo, error) {\n\tlog := logger.GetLogger(ctx)\n\tif err := vc.Connect(ctx); err != nil {\n\t\tlog.Errorf(\"failed to connect to vCenter. err: %v\", err)\n\t\treturn nil, err\n\t}\n\tclusterMoref := types.ManagedObjectReference{\n\t\tType: \"ClusterComputeResource\",\n\t\tValue: clusterMorefValue,\n\t}\n\tclusterComputeResourceMo := mo.ClusterComputeResource{}\n\terr := vc.Client.RetrieveOne(ctx, clusterMoref, []string{\"host\"}, &clusterComputeResourceMo)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to fetch hosts from cluster given clusterMorefValue %s with err: %v\",\n\t\t\tclusterMorefValue, err)\n\t\treturn nil, err\n\t}\n\n\tvar dsList []*DatastoreInfo\n\tfor _, hostMoref := range clusterComputeResourceMo.Host {\n\t\thost := &HostSystem{\n\t\t\tHostSystem: object.NewHostSystem(vc.Client.Client, hostMoref),\n\t\t}\n\t\tdsInfos, err := host.GetAllAccessibleDatastores(ctx)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to fetch datastores from host %s. Err: %v\", hostMoref, err)\n\t\t\treturn nil, err\n\t\t}\n\t\tdsList = append(dsList, dsInfos...)\n\t}\n\treturn dsList, nil\n}", "func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}", "func (r *ProjectsInstancesClustersService) List(parent string) *ProjectsInstancesClustersListCall {\n\tc := &ProjectsInstancesClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\treturn c\n}", "func (c *Cluster) GetKeyspaces(ctx context.Context) ([]*vtadminpb.Keyspace, error) {\n\tspan, ctx := trace.NewSpan(ctx, \"Cluster.GetKeyspaces\")\n\tdefer span.Finish()\n\n\tAnnotateSpan(c, span)\n\n\tif err := c.Vtctld.Dial(ctx); err != nil {\n\t\treturn nil, fmt.Errorf(\"Vtctld.Dial(cluster=%s) failed: %w\", c.ID, err)\n\t}\n\n\tresp, err := c.Vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tm sync.Mutex\n\t\twg sync.WaitGroup\n\t\trec concurrency.AllErrorRecorder\n\t\tkeyspaces = make([]*vtadminpb.Keyspace, len(resp.Keyspaces))\n\t)\n\n\tfor i, ks := range resp.Keyspaces {\n\t\twg.Add(1)\n\t\tgo func(i int, ks *vtctldatapb.Keyspace) {\n\t\t\tdefer wg.Done()\n\n\t\t\tshards, err := c.FindAllShardsInKeyspace(ctx, ks.Name, FindAllShardsInKeyspaceOptions{SkipDial: true})\n\t\t\tif err != nil {\n\t\t\t\trec.RecordError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tkeyspace := &vtadminpb.Keyspace{\n\t\t\t\tCluster: c.ToProto(),\n\t\t\t\tKeyspace: ks,\n\t\t\t\tShards: shards,\n\t\t\t}\n\n\t\t\tm.Lock()\n\t\t\tdefer m.Unlock()\n\t\t\tkeyspaces[i] = keyspace\n\t\t}(i, ks)\n\t}\n\n\twg.Wait()\n\tif rec.HasErrors() {\n\t\treturn nil, rec.Error()\n\t}\n\n\treturn keyspaces, nil\n}", "func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (a *ClusterControllerApiService) GetClustersUsingGET(ctx _context.Context, account string, application string, clusterName string) apiGetClustersUsingGETRequest {\n\treturn apiGetClustersUsingGETRequest{\n\t\tapiService: a,\n\t\tctx: ctx,\n\t\taccount: account,\n\t\tapplication: application,\n\t\tclusterName: clusterName,\n\t}\n}", "func (s *Server) ListClusters() []*envoy_config_cluster_v3.Cluster {\n\treturn resourcesToClusters(s.Clusters.List())\n}", "func RetrieveClusters(manifests string) cluster.Map {\n\tklog.V(1).Info(\"retrieving clusters from manifests\")\n\tclusters := cluster.Map{}\n\tdocuments := yamlutils.SplitDocuments(manifests)\n\tscheme := runtime.NewScheme()\n\tif err := clusterv1alpha1.AddToScheme(scheme); err != nil {\n\t\treturn cluster.Map{}\n\t}\n\tserializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, json.SerializerOptions{Yaml: true})\n\tfor _, document := range documents {\n\t\tclusterObj := clusterv1alpha1.Cluster{}\n\t\tif _, _, err := serializer.Decode([]byte(document), nil, &clusterObj); err != nil || clusterObj.TypeMeta.Kind != \"Cluster\" {\n\t\t\tcontinue\n\t\t}\n\t\tinternalCluster, err := cluster.NewClusterFromv1alpha1(&clusterObj)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tclusters[internalCluster.Name] = internalCluster\n\t}\n\treturn clusters\n}", "func (s *RpcClient) GetClusterNodes(ctx context.Context) ([]GetClusterNodesResponse, error) {\n\tres := struct {\n\t\tGeneralResponse\n\t\tResult []GetClusterNodesResponse `json:\"result\"`\n\t}{}\n\terr := s.request(ctx, \"getClusterNodes\", []interface{}{}, &res)\n\tif err != nil {\n\t\treturn []GetClusterNodesResponse{}, err\n\t}\n\tif res.Error != nil {\n\t\treturn []GetClusterNodesResponse{}, errors.New(res.Error.Message)\n\t}\n\treturn res.Result, nil\n}", "func (clgCtl *CatalogueController) GetAll(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"Retrieving all Catalogues.\\n\")\n\n\tclgRepo := cataloguerepository.NewCatalogueRepository()\n\tresult, err := clgRepo.GetAll(r.Context())\n\tif err != nil {\n\t\tclgCtl.WriteResponse(w, http.StatusInternalServerError, false, nil, err.Error())\n\t\treturn\n\t}\n\n\tclgCtl.WriteResponse(w, http.StatusOK, true, result, \"\")\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (a *HyperflexApiService) GetHyperflexClusterList(ctx context.Context) ApiGetHyperflexClusterListRequest {\n\treturn ApiGetHyperflexClusterListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (c *cluster) GetAllNodes() *[]Node {\n c.lock.RLock()\n defer c.lock.RUnlock()\n\n result := make([]Node, 0, len(c.nodes))\n for _, n := range c.nodes {\n result = append(result, Node{n.Name, n.Nid, n.GossipAddr, n.RestAddr, n.State, n.StateCtr})\n }\n\n return &result\n}", "func (c *Cluster) List() dcs.List {\n\treturn dcs.List{\n\t\tOptions: c.cfg.DCOptions,\n\t\tDomains: c.domains,\n\t}\n}", "func ListAllClusterComponents(client kclient.ClientInterface, namespace string) ([]OdoComponent, error) {\n\n\t// Get all the dynamic resources available\n\tresourceList, err := client.GetAllResourcesFromSelector(\"\", namespace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to list all dynamic resources required to find components: %w\", err)\n\t}\n\n\tvar components []OdoComponent\n\n\tfor _, resource := range resourceList {\n\n\t\t// ignore \"PackageManifest\" as they are not components, it is just a record in OpenShift catalog.\n\t\tif resource.GetKind() == \"PackageManifest\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar labels, annotations map[string]string\n\n\t\t// Retrieve the labels and annotations from the unstructured resource output\n\t\tif resource.GetLabels() != nil {\n\t\t\tlabels = resource.GetLabels()\n\t\t}\n\t\tif resource.GetAnnotations() != nil {\n\t\t\tannotations = resource.GetAnnotations()\n\t\t}\n\n\t\t// Figure out the correct name to use\n\t\t// if there is no instance label, we SKIP the resource as\n\t\t// it is not a component essential for Kubernetes.\n\t\tname := odolabels.GetComponentName(labels)\n\t\tif name == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Get the component type (if there is any..)\n\t\tcomponentType, err := odolabels.GetProjectType(nil, annotations)\n\t\tif err != nil || componentType == \"\" {\n\t\t\tcomponentType = StateTypeUnknown\n\t\t}\n\n\t\t// Get the managedBy label\n\t\t// IMPORTANT. If \"managed-by\" label is BLANK, it is most likely an operator\n\t\t// or a non-component. We do not want to show these in the list of components\n\t\t// so we skip them if there is no \"managed-by\" label.\n\n\t\tmanagedBy := odolabels.GetManagedBy(labels)\n\t\tif managedBy == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Generate the appropriate \"component\" with all necessary information\n\t\tcomponent := OdoComponent{\n\t\t\tName: name,\n\t\t\tManagedBy: managedBy,\n\t\t\tType: componentType,\n\t\t}\n\t\tmode := odolabels.GetMode(labels)\n\t\tfound := false\n\t\tfor v, otherCompo := range components {\n\t\t\tif component.Name == otherCompo.Name {\n\t\t\t\tfound = true\n\t\t\t\tif mode != \"\" {\n\t\t\t\t\tcomponents[v].Modes[mode] = true\n\t\t\t\t}\n\t\t\t\tif otherCompo.Type == StateTypeUnknown && component.Type != StateTypeUnknown {\n\t\t\t\t\tcomponents[v].Type = component.Type\n\t\t\t\t}\n\t\t\t\tif otherCompo.ManagedBy == StateTypeUnknown && component.ManagedBy != StateTypeUnknown {\n\t\t\t\t\tcomponents[v].ManagedBy = component.ManagedBy\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tif mode != \"\" {\n\t\t\t\tcomponent.Modes = map[string]bool{\n\t\t\t\t\tmode: true,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tcomponent.Modes = map[string]bool{}\n\t\t\t}\n\t\t\tcomponents = append(components, component)\n\t\t}\n\t}\n\n\treturn components, nil\n}", "func ExampleClustersClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().Get(ctx, \"resRg\", \"myCluster\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t}},\n\t// \t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t},\n\t// \t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t},\n\t// \t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t}},\n\t// \t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t}},\n\t// \t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t},\n\t// \t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t},\n\t// \t}\n}" ]
[ "0.7434148", "0.74044824", "0.73684704", "0.7240104", "0.7201324", "0.69922847", "0.6839233", "0.68016887", "0.67140263", "0.6702199", "0.66942024", "0.6630261", "0.6588179", "0.655754", "0.65555376", "0.6553293", "0.65469146", "0.6503564", "0.64957535", "0.6469443", "0.64679", "0.64303166", "0.64240044", "0.64114374", "0.64097565", "0.6384592", "0.6364105", "0.6357152", "0.6299482", "0.62990576", "0.62800044", "0.6271708", "0.62414557", "0.62024707", "0.6173476", "0.61723423", "0.6125123", "0.6116846", "0.6095118", "0.60757285", "0.60706323", "0.6062646", "0.59946924", "0.5984941", "0.5984211", "0.596553", "0.59598625", "0.5954689", "0.59502", "0.5919292", "0.5882154", "0.58758897", "0.5855008", "0.5850258", "0.58241755", "0.58206207", "0.57945454", "0.579018", "0.5768228", "0.57470626", "0.5736392", "0.5722382", "0.5719148", "0.5710975", "0.57011056", "0.5696982", "0.56875473", "0.567696", "0.5675235", "0.5669064", "0.56392366", "0.5630703", "0.56296426", "0.56062174", "0.5596535", "0.5590973", "0.5583916", "0.55823356", "0.5579215", "0.55731744", "0.5560126", "0.5552234", "0.55511576", "0.5549909", "0.5538305", "0.5537107", "0.5523449", "0.5511011", "0.5509964", "0.55097467", "0.54847765", "0.5471833", "0.5471698", "0.5452915", "0.5448061", "0.54432803", "0.5435731", "0.5423732", "0.54234767", "0.542302" ]
0.78800476
0
CreateCluster inserts a new cluster into the database
func (p PGSQLConnection) CreateCluster(cluster *ClusterModel) error { tx, err := p.connection.Beginx() if err != nil { return err } _, err = tx.NamedExec("INSERT INTO clusters (cluster_name, color) VALUES (:cluster_name, :color)", cluster) if err != nil { return err } return tx.Commit() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CreateCluster(request *restful.Request, response *restful.Response) {\n\tstart := time.Now()\n\n\tform := CreateClusterForm{}\n\t_ = request.ReadEntity(&form)\n\n\terr := utils.Validate.Struct(&form)\n\tif err != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\t_ = response.WriteHeaderAndEntity(400, utils.FormatValidationError(err))\n\t\treturn\n\t}\n\n\tuser := auth.GetUser(request)\n\tcluster := &models.BcsCluster{\n\t\tID: form.ClusterID,\n\t\tCreatorId: user.ID,\n\t}\n\tswitch form.ClusterType {\n\tcase \"k8s\":\n\t\tcluster.ClusterType = BcsK8sCluster\n\tcase \"mesos\":\n\t\tcluster.ClusterType = BcsMesosCluster\n\tcase \"tke\":\n\t\tcluster.ClusterType = BcsTkeCluster\n\t\tif form.TkeClusterID == \"\" || form.TkeClusterRegion == \"\" {\n\t\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\t\tblog.Warnf(\"create tke cluster failed, empty tke clusterid or region\")\n\t\t\tmessage := fmt.Sprintf(\"errcode: %d, create tke cluster failed, empty tke clusterid or region\", common.BcsErrApiBadRequest)\n\t\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\t\treturn\n\t\t}\n\t\tcluster.TkeClusterId = form.TkeClusterID\n\t\tcluster.TkeClusterRegion = form.TkeClusterRegion\n\tdefault:\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Warnf(\"create failed, cluster type invalid\")\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create failed, cluster type invalid\", common.BcsErrApiBadRequest)\n\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\treturn\n\t}\n\n\tclusterInDb := sqlstore.GetCluster(cluster.ID)\n\tif clusterInDb != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Warnf(\"create cluster failed, cluster [%s] already exist\", cluster.ID)\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create cluster failed, cluster [%s] already exist\", common.BcsErrApiBadRequest, cluster.ID)\n\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\treturn\n\t}\n\n\terr = sqlstore.CreateCluster(cluster)\n\tif err != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Errorf(\"failed to create cluster [%s]: %s\", cluster.ID, err.Error())\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create cluster [%s] failed, error: %s\", common.BcsErrApiInternalDbError, cluster.ID, err.Error())\n\t\tutils.WriteServerError(response, common.BcsErrApiInternalDbError, message)\n\t\treturn\n\t}\n\n\tdata := utils.CreateResponseData(nil, \"success\", *cluster)\n\t_, _ = response.Write([]byte(data))\n\n\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.SucStatus, start)\n}", "func CreateCluster(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Cluster creation is stared\")\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Bind json into CreateClusterRequest struct\")\n\n\t// bind request body to struct\n\tvar createClusterBaseRequest banzaiTypes.CreateClusterRequest\n\tif err := c.BindJSON(&createClusterBaseRequest); err != nil {\n\t\t// bind failed\n\t\tbanzaiUtils.LogError(banzaiConstants.TagCreateCluster, \"Required field is empty: \"+err.Error())\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: \"Required field is empty\",\n\t\t\tcloud.JsonKeyError: err,\n\t\t})\n\t\treturn\n\t} else {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Bind succeeded\")\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Searching entry with name:\", createClusterBaseRequest.Name)\n\tvar savedCluster banzaiSimpleTypes.ClusterSimple\n\n\tdatabase.Query(\"SELECT * FROM \"+banzaiSimpleTypes.ClusterSimple.TableName(savedCluster)+\" WHERE name = ?;\",\n\t\tcreateClusterBaseRequest.Name,\n\t\t&savedCluster)\n\n\tif savedCluster.ID != 0 {\n\t\t// duplicated entry\n\t\tmsg := \"Duplicate entry '\" + savedCluster.Name + \"' for key 'name'\"\n\t\tbanzaiUtils.LogError(banzaiConstants.TagCreateCluster, msg)\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: msg,\n\t\t})\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"No entity with this name exists. The creation is possible.\")\n\n\tcloudType := createClusterBaseRequest.Cloud\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Cloud type is \", cloudType)\n\n\tswitch cloudType {\n\tcase banzaiConstants.Amazon:\n\t\t// validate and create Amazon cluster\n\t\tawsData := createClusterBaseRequest.Properties.CreateClusterAmazon\n\t\tif isValid, err := awsData.Validate(); isValid && len(err) == 0 {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Validation is OK\")\n\t\t\tif isOk, createdCluster := cloud.CreateClusterAmazon(&createClusterBaseRequest, c); isOk {\n\t\t\t\t// update prometheus config..\n\t\t\t\tgo updatePrometheusWithRetryConf(createdCluster)\n\t\t\t}\n\t\t} else {\n\t\t\t// not valid request\n\t\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\t\tcloud.JsonKeyMessage: err,\n\t\t\t})\n\t\t}\n\tcase banzaiConstants.Azure:\n\t\t// validate and create Azure cluster\n\t\taksData := createClusterBaseRequest.Properties.CreateClusterAzure\n\t\tif isValid, err := aksData.Validate(); isValid && len(err) == 0 {\n\t\t\tif cloud.CreateClusterAzure(&createClusterBaseRequest, c) {\n\t\t\t\t// update prometheus config..\n\t\t\t\tupdatePrometheus()\n\t\t\t}\n\t\t} else {\n\t\t\t// not valid request\n\t\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\t\tcloud.JsonKeyMessage: err,\n\t\t\t})\n\t\t}\n\tdefault:\n\t\t// wrong cloud type\n\t\tcloud.SendNotSupportedCloudResponse(c, banzaiConstants.TagCreateCluster)\n\t}\n\n}", "func (a *Client) CreateCluster(params *CreateClusterParams, authInfo runtime.ClientAuthInfoWriter) (*CreateClusterCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateClusterParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"CreateCluster\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/api/v1/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateClusterReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*CreateClusterCreated), nil\n\n}", "func (client *Client) CreateCluster(request *CreateClusterRequest) (response *CreateClusterResponse, err error) {\n\tresponse = CreateCreateClusterResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (e *ECS) CreateCluster(req *CreateClusterReq) (resp *CreateClusterResp, err error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"CreateCluster\")\n\tparams[\"clusterName\"] = req.ClusterName\n\n\tresp = new(CreateClusterResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (c Client) CreateCluster(name string, size int) (Cluster, error) {\n\tvar def struct {\n\t\tClusterName string `json:\"cluster_name\"`\n\t\tClusterSize string `json:\"cluster_size,omitempty\"`\n\t}\n\tdef.ClusterName = name\n\tif size > 0 {\n\t\tdef.ClusterSize = fmt.Sprintf(\"%d\", size)\n\t}\n\tdef_json, err := json.Marshal(def)\n\tif err != nil {\n\t\treturn Cluster{}, err\n\t}\n\n\theaders := make(http.Header)\n\theaders.Set(\"Content-Type\", \"application/json\")\n\theaders.Set(\"Accept\", \"application/json\")\n\tbody, err := c.watsonClient.MakeRequest(\"POST\", c.version+\"/solr_clusters\", bytes.NewReader(def_json), headers)\n\tif err != nil {\n\t\treturn Cluster{}, err\n\t}\n\tvar response Cluster\n\terr = json.Unmarshal(body, &response)\n\treturn response, err\n}", "func CreateCluster(c *cli.Context) error {\n\n\t// On Error delete the cluster. If there createCluster() encounter any error,\n\t// call this function to remove all resources allocated for the cluster so far\n\t// so that they don't linger around.\n\tdeleteCluster := func() {\n\t\tlog.Println(\"ERROR: Cluster creation failed, rolling back...\")\n\t\tif err := DeleteCluster(c); err != nil {\n\t\t\tlog.Printf(\"Error: Failed to delete cluster %s\", c.String(\"name\"))\n\t\t}\n\t}\n\n\t// validate --wait flag\n\tif c.IsSet(\"wait\") && c.Int(\"wait\") < 0 {\n\t\tlog.Fatalf(\"Negative value for '--wait' not allowed (set '%d')\", c.Int(\"wait\"))\n\t}\n\n\t/**********************\n\t *\t\t\t\t\t\t\t\t\t\t*\n\t *\t\tCONFIGURATION\t\t*\n\t * vvvvvvvvvvvvvvvvvv *\n\t **********************/\n\n\t/*\n\t * --name, -n\n\t * Name of the cluster\n\t */\n\n\t// ensure that it's a valid hostname, because it will be part of container names\n\tif err := CheckClusterName(c.String(\"name\")); err != nil {\n\t\treturn err\n\t}\n\n\t// check if the cluster name is already taken\n\tif cluster, err := getClusters(false, c.String(\"name\")); err != nil {\n\t\treturn err\n\t} else if len(cluster) != 0 {\n\t\t// A cluster exists with the same name. Return with an error.\n\t\treturn fmt.Errorf(\" Cluster %s already exists\", c.String(\"name\"))\n\t}\n\n\t/*\n\t * --image, -i\n\t * The k3s image used for the k3d node containers\n\t */\n\t// define image\n\timage := c.String(\"image\")\n\t// if no registry was provided, use the default docker.io\n\tif len(strings.Split(image, \"/\")) <= 2 {\n\t\timage = fmt.Sprintf(\"%s/%s\", DefaultRegistry, image)\n\t}\n\n\t/*\n\t * Cluster network\n\t * For proper communication, all k3d node containers have to be in the same docker network\n\t */\n\t// create cluster network\n\tnetworkID, err := createClusterNetwork(c.String(\"name\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Created cluster network with ID %s\", networkID)\n\n\t/*\n\t * --env, -e\n\t * Environment variables that will be passed into the k3d node containers\n\t */\n\t// environment variables\n\tenv := []string{\"K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml\"}\n\tenv = append(env, c.StringSlice(\"env\")...)\n\tenv = append(env, fmt.Sprintf(\"K3S_CLUSTER_SECRET=%s\", GenerateRandomString(20)))\n\n\t/*\n\t * --label, -l\n\t * Docker container labels that will be added to the k3d node containers\n\t */\n\t// labels\n\tlabelmap, err := mapNodesToLabelSpecs(c.StringSlice(\"label\"), GetAllContainerNames(c.String(\"name\"), DefaultServerCount, c.Int(\"workers\")))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t/*\n\t * Arguments passed on to the k3s server and agent, will be filled later\n\t */\n\tk3AgentArgs := []string{}\n\tk3sServerArgs := []string{}\n\n\t/*\n\t * --api-port, -a\n\t * The port that will be used by the k3s API-Server\n\t * It will be mapped to localhost or to another hist interface, if specified\n\t * If another host is chosen, we also add a tls-san argument for the server to allow connections\n\t */\n\tapiPort, err := parseAPIPort(c.String(\"api-port\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tk3sServerArgs = append(k3sServerArgs, \"--https-listen-port\", apiPort.Port)\n\n\t// When the 'host' is not provided by --api-port, try to fill it using Docker Machine's IP address.\n\tif apiPort.Host == \"\" {\n\t\tapiPort.Host, err = getDockerMachineIp()\n\t\t// IP address is the same as the host\n\t\tapiPort.HostIP = apiPort.Host\n\t\t// In case of error, Log a warning message, and continue on. Since it more likely caused by a miss configured\n\t\t// DOCKER_MACHINE_NAME environment variable.\n\t\tif err != nil {\n\t\t\tlog.Warning(\"Failed to get docker machine IP address, ignoring the DOCKER_MACHINE_NAME environment variable setting.\")\n\t\t}\n\t}\n\n\t// Add TLS SAN for non default host name\n\tif apiPort.Host != \"\" {\n\t\tlog.Printf(\"Add TLS SAN for %s\", apiPort.Host)\n\t\tk3sServerArgs = append(k3sServerArgs, \"--tls-san\", apiPort.Host)\n\t}\n\n\t/*\n\t * --server-arg, -x\n\t * Add user-supplied arguments for the k3s server\n\t */\n\tif c.IsSet(\"server-arg\") || c.IsSet(\"x\") {\n\t\tk3sServerArgs = append(k3sServerArgs, c.StringSlice(\"server-arg\")...)\n\t}\n\n\t/*\n\t * --agent-arg\n\t * Add user-supplied arguments for the k3s agent\n\t */\n\tif c.IsSet(\"agent-arg\") {\n\t\tif c.Int(\"workers\") < 1 {\n\t\t\tlog.Warnln(\"--agent-arg supplied, but --workers is 0, so no agents will be created\")\n\t\t}\n\t\tk3AgentArgs = append(k3AgentArgs, c.StringSlice(\"agent-arg\")...)\n\t}\n\n\t/*\n\t * --port, -p, --publish, --add-port\n\t * List of ports, that should be mapped from some or all k3d node containers to the host system (or other interface)\n\t */\n\t// new port map\n\tportmap, err := mapNodesToPortSpecs(c.StringSlice(\"port\"), GetAllContainerNames(c.String(\"name\"), DefaultServerCount, c.Int(\"workers\")))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t/*\n\t * Image Volume\n\t * A docker volume that will be shared by every k3d node container in the cluster.\n\t * This volume will be used for the `import-image` command.\n\t * On it, all node containers can access the image tarball.\n\t */\n\t// create a docker volume for sharing image tarballs with the cluster\n\timageVolume, err := createImageVolume(c.String(\"name\"))\n\tlog.Println(\"Created docker volume \", imageVolume.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/*\n\t * --volume, -v\n\t * List of volumes: host directory mounts for some or all k3d node containers in the cluster\n\t */\n\tvolumes := c.StringSlice(\"volume\")\n\n\tvolumesSpec, err := NewVolumes(volumes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvolumesSpec.DefaultVolumes = append(volumesSpec.DefaultVolumes, fmt.Sprintf(\"%s:/images\", imageVolume.Name))\n\n\t/*\n\t * --registry-file\n\t * check if there is a registries file\n\t */\n\tregistriesFile := \"\"\n\tif c.IsSet(\"registries-file\") {\n\t\tregistriesFile = c.String(\"registries-file\")\n\t\tif !fileExists(registriesFile) {\n\t\t\tlog.Fatalf(\"registries-file %q does not exists\", registriesFile)\n\t\t}\n\t} else {\n\t\tregistriesFile, err = getGlobalRegistriesConfFilename()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif !fileExists(registriesFile) {\n\t\t\t// if the default registries file does not exists, go ahead but do not try to load it\n\t\t\tregistriesFile = \"\"\n\t\t}\n\t}\n\n\t/*\n\t * clusterSpec\n\t * Defines, with which specifications, the cluster and the nodes inside should be created\n\t */\n\tclusterSpec := &ClusterSpec{\n\t\tAgentArgs: k3AgentArgs,\n\t\tAPIPort: *apiPort,\n\t\tAutoRestart: c.Bool(\"auto-restart\"),\n\t\tClusterName: c.String(\"name\"),\n\t\tEnv: env,\n\t\tNodeToLabelSpecMap: labelmap,\n\t\tImage: image,\n\t\tNodeToPortSpecMap: portmap,\n\t\tPortAutoOffset: c.Int(\"port-auto-offset\"),\n\t\tRegistriesFile: registriesFile,\n\t\tRegistryEnabled: c.Bool(\"enable-registry\"),\n\t\tRegistryCacheEnabled: c.Bool(\"enable-registry-cache\"),\n\t\tRegistryName: c.String(\"registry-name\"),\n\t\tRegistryPort: c.Int(\"registry-port\"),\n\t\tRegistryVolume: c.String(\"registry-volume\"),\n\t\tServerArgs: k3sServerArgs,\n\t\tVolumes: volumesSpec,\n\t}\n\n\t/******************\n\t *\t\t\t\t\t\t\t\t*\n\t *\t\tCREATION\t\t*\n\t * vvvvvvvvvvvvvv\t*\n\t ******************/\n\n\tlog.Printf(\"Creating cluster [%s]\", c.String(\"name\"))\n\n\t/*\n\t * Cluster Directory\n\t */\n\t// create the directory where we will put the kubeconfig file by default (when running `k3d get-config`)\n\tcreateClusterDir(c.String(\"name\"))\n\n\t/* (1)\n\t * Registry (optional)\n\t * Create the (optional) registry container\n\t */\n\tvar registryNameExists *dnsNameCheck\n\tif clusterSpec.RegistryEnabled {\n\t\tregistryNameExists = newAsyncNameExists(clusterSpec.RegistryName, 1*time.Second)\n\t\tif _, err = createRegistry(*clusterSpec); err != nil {\n\t\t\tdeleteCluster()\n\t\t\treturn err\n\t\t}\n\t}\n\n\t/* (2)\n\t * Server\n\t * Create the server node container\n\t */\n\tserverContainerID, err := createServer(clusterSpec)\n\tif err != nil {\n\t\tdeleteCluster()\n\t\treturn err\n\t}\n\n\t/* (2.1)\n\t * Wait\n\t * Wait for k3s server to be done initializing, if wanted\n\t */\n\t// We're simply scanning the container logs for a line that tells us that everything's up and running\n\t// TODO: also wait for worker nodes\n\tif c.IsSet(\"wait\") {\n\t\tif err := waitForContainerLogMessage(serverContainerID, \"Wrote kubeconfig\", c.Int(\"wait\")); err != nil {\n\t\t\tdeleteCluster()\n\t\t\treturn fmt.Errorf(\"ERROR: failed while waiting for server to come up\\n%+v\", err)\n\t\t}\n\t}\n\n\t/* (3)\n\t * Workers\n\t * Create the worker node containers\n\t */\n\t// TODO: do this concurrently in different goroutines\n\tif c.Int(\"workers\") > 0 {\n\t\tlog.Printf(\"Booting %s workers for cluster %s\", strconv.Itoa(c.Int(\"workers\")), c.String(\"name\"))\n\t\tfor i := 0; i < c.Int(\"workers\"); i++ {\n\t\t\tworkerID, err := createWorker(clusterSpec, i)\n\t\t\tif err != nil {\n\t\t\t\tdeleteCluster()\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Printf(\"Created worker with ID %s\\n\", workerID)\n\t\t}\n\t}\n\n\t/* (4)\n\t * Done\n\t * Finished creating resources.\n\t */\n\tlog.Printf(\"SUCCESS: created cluster [%s]\", c.String(\"name\"))\n\n\tif clusterSpec.RegistryEnabled {\n\t\tlog.Printf(\"A local registry has been started as %s:%d\", clusterSpec.RegistryName, clusterSpec.RegistryPort)\n\n\t\texists, err := registryNameExists.Exists()\n\t\tif !exists || err != nil {\n\t\t\tlog.Printf(\"Make sure you have an alias in your /etc/hosts file like '127.0.0.1 %s'\", clusterSpec.RegistryName)\n\t\t}\n\t}\n\n\tlog.Printf(`You can now use the cluster with:\n\nexport KUBECONFIG=\"$(%s get-kubeconfig --name='%s')\"\nkubectl cluster-info`, os.Args[0], c.String(\"name\"))\n\n\treturn nil\n}", "func (a ClustersAPI) Create(cluster httpmodels.CreateReq) (httpmodels.CreateResp, error) {\n\tvar createResp httpmodels.CreateResp\n\n\tresp, err := a.Client.performQuery(http.MethodPost, \"/clusters/create\", cluster, nil)\n\tif err != nil {\n\t\treturn createResp, err\n\t}\n\n\terr = json.Unmarshal(resp, &createResp)\n\treturn createResp, err\n}", "func NewCluster(path string, name string, numShards int, columns int, createTable string, idIndex int) error {\n\tif err := os.MkdirAll(path, 0777); err != nil {\n\t\treturn fmt.Errorf(\"Directory already exists\")\n\t}\n\n\t// convert path to absolute\n\tpath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc := &ClusterMetadata{\n\t\tTableName: name,\n\t\tPath: path,\n\t\tNumShards: numShards,\n\t\tShards: make([]string, numShards),\n\t\tNumColumns: columns,\n\t\tIdIndex: idIndex,\n\t}\n\n\t// create each shard\n\tfor i := 0; i < numShards; i++ {\n\t\tdbName := \"shard\" + strconv.Itoa(i) + \".db\"\n\t\tdbPath := filepath.Join(path, dbName)\n\n\t\tdb, err := sql.Open(\"sqlite3\", dbPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdb.Exec(createTable)\n\t\tdb.Exec(metadataCreateTable)\n\t\tstmt, err := db.Prepare(metadataInsertInto)\n\t\tstmt.Exec(c.TableName, i)\n\n\t\tc.Shards[i] = dbName\n\t\tdb.Close()\n\t}\n\n\t// write config to JSON\n\tshardfilePath := filepath.Join(path, \"shardfile\")\n\n\tf, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(shardfilePath, f, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (svc ServerlessClusterService) Create(ctx context.Context,\n\tinput *models.CreateServerlessClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tOperation: models.Mutation,\n\t\tName: \"createServerlessCluster\",\n\t\tInput: *input,\n\t\tArgs: nil,\n\t\tResponse: cluster,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &cluster, resp, nil\n}", "func CreateCluster(clusterName string, options ...CreateOption) error {\n\tflags := &CreateOptions{}\n\tfor _, o := range options {\n\t\to(flags)\n\t}\n\n\t// Check if the cluster name already exists\n\tknown, err := status.IsKnown(clusterName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif known {\n\t\treturn errors.Errorf(\"a cluster with the name %q already exists\", clusterName)\n\t}\n\n\tfmt.Printf(\"Creating cluster %q ...\\n\", clusterName)\n\n\t// attempt to explicitly pull the required node image if it doesn't exist locally\n\t// we don't care if this errors, we'll still try to run which also pulls\n\tensureNodeImage(flags.image)\n\n\thandleErr := func(err error) error {\n\t\t// In case of errors nodes are deleted (except if retain is explicitly set)\n\t\tif !flags.retain {\n\t\t\tif c, err := status.FromDocker(clusterName); err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t} else {\n\t\t\t\tfor _, n := range c.AllNodes() {\n\t\t\t\t\tif err := exec.NewHostCmd(\n\t\t\t\t\t\t\"docker\",\n\t\t\t\t\t\t\"rm\",\n\t\t\t\t\t\t\"-f\", // force the container to be deleted now\n\t\t\t\t\t\t\"-v\", // delete volumes\n\t\t\t\t\t\tn.Name(),\n\t\t\t\t\t).Run(); err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err, \"failed to delete node %s\", n.Name())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\t// Create node containers as defined in the kind config\n\tif err := createNodes(\n\t\tclusterName,\n\t\tflags,\n\t); err != nil {\n\t\treturn handleErr(err)\n\t}\n\n\tfmt.Println()\n\tfmt.Printf(\"Nodes creation complete. You can now continue creating a Kubernetes cluster using\\n\")\n\tfmt.Printf(\"kinder do, the kinder swiss knife 🚀!\\n\")\n\n\treturn nil\n}", "func CreateCluster(name string, cloudCred string, kubeconfigPath string, orgID string) {\n\n\tStep(fmt.Sprintf(\"Create cluster [%s] in org [%s]\", name, orgID), func() {\n\t\tbackupDriver := Inst().Backup\n\t\tkubeconfigRaw, err := ioutil.ReadFile(kubeconfigPath)\n\t\texpect(err).NotTo(haveOccurred(),\n\t\t\tfmt.Sprintf(\"Failed to read kubeconfig file from location [%s]. Error:[%v]\",\n\t\t\t\tkubeconfigPath, err))\n\n\t\tclusterCreateReq := &api.ClusterCreateRequest{\n\t\t\tCreateMetadata: &api.CreateMetadata{\n\t\t\t\tName: name,\n\t\t\t\tOrgId: orgID,\n\t\t\t},\n\t\t\tKubeconfig: base64.StdEncoding.EncodeToString(kubeconfigRaw),\n\t\t\tCloudCredential: cloudCred,\n\t\t}\n\t\t//ctx, err := backup.GetPxCentralAdminCtx()\n\t\tctx, err := backup.GetAdminCtxFromSecret()\n\t\texpect(err).NotTo(haveOccurred(),\n\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\terr))\n\t\t_, err = backupDriver.CreateCluster(ctx, clusterCreateReq)\n\t\texpect(err).NotTo(haveOccurred(),\n\t\t\tfmt.Sprintf(\"Failed to create cluster [%s] in org [%s]. Error : [%v]\",\n\t\t\t\tname, orgID, err))\n\t})\n}", "func CreateCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) (*api.ZookeeperCluster, error) {\n\tt.Logf(\"creating zookeeper cluster: %s\", z.Name)\n\terr := f.Client.Create(goctx.TODO(), z, &framework.CleanupOptions{TestContext: ctx, Timeout: CleanupTimeout, RetryInterval: CleanupRetryInterval})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create CR: %v\", err)\n\t}\n\n\tzk := &api.ZookeeperCluster{}\n\terr = f.Client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zk)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain created CR: %v\", err)\n\t}\n\tt.Logf(\"created zookeeper cluster: %s\", zk.Name)\n\treturn z, nil\n}", "func (us *ClusterStore) Create(u *model.Cluster) (err error) {\n\treturn us.db.Create(u).Error\n}", "func (s *BasePlSqlParserListener) EnterCreate_cluster(ctx *Create_clusterContext) {}", "func (c *ClustersController) Create(ctx *app.CreateClustersContext) error {\n\tclustr := repository.Cluster{\n\t\tName: ctx.Payload.Data.Name,\n\t\tType: ctx.Payload.Data.Type,\n\t\tURL: ctx.Payload.Data.APIURL,\n\t\tAppDNS: ctx.Payload.Data.AppDNS,\n\t\tSAToken: ctx.Payload.Data.ServiceAccountToken,\n\t\tSAUsername: ctx.Payload.Data.ServiceAccountUsername,\n\t\tAuthClientID: ctx.Payload.Data.AuthClientID,\n\t\tAuthClientSecret: ctx.Payload.Data.AuthClientSecret,\n\t\tAuthDefaultScope: ctx.Payload.Data.AuthClientDefaultScope,\n\t}\n\tif ctx.Payload.Data.ConsoleURL != nil {\n\t\tclustr.ConsoleURL = *ctx.Payload.Data.ConsoleURL\n\t}\n\tif ctx.Payload.Data.LoggingURL != nil {\n\t\tclustr.LoggingURL = *ctx.Payload.Data.LoggingURL\n\t}\n\tif ctx.Payload.Data.MetricsURL != nil {\n\t\tclustr.MetricsURL = *ctx.Payload.Data.MetricsURL\n\t}\n\tif ctx.Payload.Data.CapacityExhausted != nil {\n\t\tclustr.CapacityExhausted = *ctx.Payload.Data.CapacityExhausted\n\t}\n\tif ctx.Payload.Data.TokenProviderID != nil {\n\t\tclustr.TokenProviderID = *ctx.Payload.Data.TokenProviderID\n\t}\n\tclusterSvc := c.app.ClusterService()\n\terr := clusterSvc.CreateOrSaveCluster(ctx, &clustr)\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"error\": err,\n\t\t}, \"error while creating new cluster configuration\")\n\t\treturn app.JSONErrorResponse(ctx, err)\n\t}\n\tctx.ResponseData.Header().Set(\"Location\", app.ClustersHref(clustr.ClusterID.String()))\n\treturn ctx.Created()\n}", "func (c *AKSCluster) CreateCluster() error {\n\n\tlog := logger.WithFields(logrus.Fields{\"action\": constants.TagCreateCluster})\n\n\t// create profiles model for the request\n\tvar profiles []containerservice.AgentPoolProfile\n\tif nodePools := c.modelCluster.Azure.NodePools; nodePools != nil {\n\t\tfor _, np := range nodePools {\n\t\t\tif np != nil {\n\t\t\t\tcount := int32(np.Count)\n\t\t\t\tname := np.Name\n\t\t\t\tprofiles = append(profiles, containerservice.AgentPoolProfile{\n\t\t\t\t\tName: &name,\n\t\t\t\t\tCount: &count,\n\t\t\t\t\tVMSize: containerservice.VMSizeTypes(np.NodeInstanceType),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tr := azureCluster.CreateClusterRequest{\n\t\tName: c.modelCluster.Name,\n\t\tLocation: c.modelCluster.Location,\n\t\tResourceGroup: c.modelCluster.Azure.ResourceGroup,\n\t\tKubernetesVersion: c.modelCluster.Azure.KubernetesVersion,\n\t\tProfiles: profiles,\n\t}\n\tclient, err := c.GetAKSClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.With(log.Logger)\n\n\t// call creation\n\tcreatedCluster, err := azureClient.CreateUpdateCluster(client, &r)\n\tif err != nil {\n\t\t// creation failed\n\t\t// todo status code!??\n\t\treturn err\n\t}\n\t// creation success\n\tlog.Info(\"Cluster created successfully!\")\n\n\tc.azureCluster = &createdCluster.Value\n\n\t// polling cluster\n\tpollingResult, err := azureClient.PollingCluster(client, r.Name, r.ResourceGroup)\n\tif err != nil {\n\t\t// polling error\n\t\t// todo status code!??\n\t\treturn err\n\t}\n\tlog.Info(\"Cluster is ready...\")\n\tc.azureCluster = &pollingResult.Value\n\treturn nil\n}", "func CreateCluster(ctx context.Context, clusterClient containerservice.ManagedClustersClient, parameters ClusterParameters) (status string, err error) {\n\tresourceGroupName := parameters.Name + \"-group\"\n\n\t// create map of containerservice.ManagedClusterAgentPoolProfile from parameters.AgentPools\n\tagentPoolProfiles := make([]containerservice.ManagedClusterAgentPoolProfile, len(parameters.AgentPools))\n\tfor i := range parameters.AgentPools {\n\t\tvar vmSize containerservice.VMSizeTypes\n\n\t\t// get list of available VM size types\n\t\tvmSizeTypes := containerservice.PossibleVMSizeTypesValues()\n\t\tfor j := range vmSizeTypes {\n\t\t\t// convert the vmSizeTypes to a string\n\t\t\ttypeAsStr := string(vmSizeTypes[j])\n\t\t\t// compare input type against available vm size types\n\t\t\tif parameters.AgentPools[i].Type == typeAsStr {\n\t\t\t\tvmSize = vmSizeTypes[j]\n\t\t\t}\n\t\t}\n\t\tif vmSize == \"\" {\n\t\t\treturn \"\", fmt.Errorf(\"invalid VM Size selected\")\n\t\t}\n\n\t\tagentPoolProfiles[i] = containerservice.ManagedClusterAgentPoolProfile{\n\t\t\tName: parameters.AgentPools[i].Name,\n\t\t\tCount: parameters.AgentPools[i].Count,\n\t\t\tVMSize: vmSize,\n\t\t}\n\t}\n\n\tfuture, err := clusterClient.CreateOrUpdate(\n\t\tctx,\n\t\tresourceGroupName,\n\t\tparameters.Name,\n\t\tcontainerservice.ManagedCluster{\n\t\t\tName: &parameters.Name,\n\t\t\tLocation: &parameters.Location,\n\t\t\tManagedClusterProperties: &containerservice.ManagedClusterProperties{\n\t\t\t\tDNSPrefix: &parameters.Name,\n\t\t\t\tKubernetesVersion: &parameters.KubernetesVersion,\n\t\t\t\tAgentPoolProfiles: &agentPoolProfiles,\n\t\t\t\tServicePrincipalProfile: &containerservice.ManagedClusterServicePrincipalProfile{\n\t\t\t\t\tClientID: to.StringPtr(parameters.ClientID),\n\t\t\t\t\tSecret: to.StringPtr(parameters.ClientSecret),\n\t\t\t\t},\n\t\t\t},\n\t\t\tTags: parameters.Tags,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot create aks cluster: %v\", err)\n\t}\n\n\tstatus = future.Status()\n\tif status != \"Creating\" {\n\t\treturn \"\", fmt.Errorf(\"cannot create cluster: %v\", status)\n\t}\n\n\treturn status, nil\n}", "func createCluster(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {\n\tctx := r.Context()\n\tallowed := permission.Check(t, permission.PermClusterCreate)\n\tif !allowed {\n\t\treturn permission.ErrUnauthorized\n\t}\n\n\terr = deprecateFormContentType(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar provCluster provTypes.Cluster\n\terr = ParseJSON(r, &provCluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\tevt, err := event.New(&event.Opts{\n\t\tTarget: event.Target{Type: event.TargetTypeCluster, Value: provCluster.Name},\n\t\tKind: permission.PermClusterCreate,\n\t\tOwner: t,\n\t\tRemoteAddr: r.RemoteAddr,\n\t\tCustomData: event.FormToCustomData(InputFields(r)),\n\t\tAllowed: event.Allowed(permission.PermClusterReadEvents),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { evt.Done(err) }()\n\t_, err = servicemanager.Cluster.FindByName(ctx, provCluster.Name)\n\tif err == nil {\n\t\treturn &tsuruErrors.HTTP{\n\t\t\tCode: http.StatusConflict,\n\t\t\tMessage: \"cluster already exists\",\n\t\t}\n\t}\n\tfor _, poolName := range provCluster.Pools {\n\t\t_, err = pool.GetPoolByName(ctx, poolName)\n\t\tif err != nil {\n\t\t\tif err == pool.ErrPoolNotFound {\n\t\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\t\tCode: http.StatusNotFound,\n\t\t\t\t\tMessage: err.Error(),\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\tstreamResponse := strings.HasPrefix(r.Header.Get(\"Accept\"), \"application/x-json-stream\")\n\tif streamResponse {\n\t\tw.Header().Set(\"Content-Type\", \"application/x-json-stream\")\n\t\tkeepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"\")\n\t\tdefer keepAliveWriter.Stop()\n\t\twriter := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}\n\t\tevt.SetLogWriter(writer)\n\t}\n\tprovCluster.Writer = evt\n\terr = servicemanager.Cluster.Create(ctx, provCluster)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn nil\n}", "func (c starterClusterServiceOp) Create(ctx context.Context, input *models.CreateStarterClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tName: \"createStarterCluster\",\n\t\tOperation: models.Mutation,\n\t\tInput: *input,\n\t\tArgs: nil,\n\t\tResponse: cluster,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &cluster, resp, err\n}", "func (a *ClustersApiService) CreateClusterExecute(r ApiCreateClusterRequest) (CreateClusterResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue CreateClusterResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ClustersApiService.CreateCluster\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/spaces/{space}/clusters\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"space\"+\"}\", _neturl.PathEscape(parameterToString(r.space, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\tif r.createCluster == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"createCluster is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.createCluster\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (c *Cluster) Create(ctx context.Context) error {\n\tif c.RootCACert != \"\" && c.Status == \"\" {\n\t\tc.PersistStore.PersistStatus(*c, Init)\n\t}\n\terr := c.createInner(ctx)\n\tif err != nil {\n\t\tif err == ErrClusterExists {\n\t\t\tc.PersistStore.PersistStatus(*c, Running)\n\t\t} else {\n\t\t\tc.PersistStore.PersistStatus(*c, Error)\n\t\t}\n\t\treturn err\n\t}\n\treturn c.PersistStore.PersistStatus(*c, Running)\n}", "func (c *client) CreateCluster(settings CreateClusterSettings) error {\n\tk3sImage, err := getK3sImage(settings.KubernetesVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmdArgs := []string{\n\t\t\"cluster\", \"create\", c.clusterName,\n\t\t\"--kubeconfig-update-default\",\n\t\t\"--timeout\", fmt.Sprintf(\"%ds\", int(c.userTimeout.Seconds())),\n\t\t\"--agents\", fmt.Sprintf(\"%d\", settings.Workers),\n\t\t\"--image\", k3sImage,\n\t}\n\n\tcmdArgs = append(cmdArgs, getCreateClusterArgs(settings)...)\n\n\tcmdArgs = append(cmdArgs, constructArgs(\"--port\", settings.PortMapping)...)\n\t//add further k3d args which are not offered by the Kyma CLI flags\n\tcmdArgs = append(cmdArgs, settings.Args...)\n\n\t_, err = c.runCmd(cmdArgs...)\n\treturn err\n}", "func (a *ClustersApiService) CreateCluster(ctx _context.Context, space string) ApiCreateClusterRequest {\n\treturn ApiCreateClusterRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tspace: space,\n\t}\n}", "func (s *Server) CreateCluster(cluster *api.Cluster) (*api.Cluster, error) {\n\tinstance, err := s.doCreateCluster(convertAPIToRegions(cluster.Regions), convertAPIToStores(cluster.Stores))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn convertClusterToAPI(instance), nil\n}", "func (t *ClusterRepo) Create(te *models.CustomCluster) error {\n\tvar old models.CustomCluster\n\tif err := t.DB.Where(\"name=?\", te.Name).Find(&old).Error; err != nil {\n\t\tif err == gorm.ErrRecordNotFound {\n\t\t\t// not found error, create new\n\t\t\tif te.ClusterID == \"\" {\n\t\t\t\tte.ClusterID = uuidutil.NewUUID()\n\t\t\t}\n\t\t\tif err := t.DB.Save(te).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\treturn fmt.Errorf(\"rke cluster %s is exist\", te.Name)\n}", "func (sqlStore *SQLStore) createClusterInstallation(db execer, clusterInstallation *model.ClusterInstallation) error {\n\tclusterInstallation.ID = model.NewID()\n\tclusterInstallation.CreateAt = model.GetMillis()\n\n\t_, err := sqlStore.execBuilder(db, sq.\n\t\tInsert(\"ClusterInstallation\").\n\t\tSetMap(map[string]interface{}{\n\t\t\t\"ID\": clusterInstallation.ID,\n\t\t\t\"ClusterID\": clusterInstallation.ClusterID,\n\t\t\t\"InstallationID\": clusterInstallation.InstallationID,\n\t\t\t\"Namespace\": clusterInstallation.Namespace,\n\t\t\t\"State\": clusterInstallation.State,\n\t\t\t\"CreateAt\": clusterInstallation.CreateAt,\n\t\t\t\"DeleteAt\": 0,\n\t\t\t\"APISecurityLock\": clusterInstallation.APISecurityLock,\n\t\t\t\"LockAcquiredBy\": nil,\n\t\t\t\"LockAcquiredAt\": 0,\n\t\t\t\"IsActive\": clusterInstallation.IsActive,\n\t\t}),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create cluster installation\")\n\t}\n\n\treturn nil\n}", "func (k *Kubeadm) CreateCluster() error {\n\n\tvar (\n\t\tjoinCommand string\n\t\terr error\n\t)\n\n\tif k.ClusterName == \"\" {\n\t\treturn errors.New(\"cluster name is not set\")\n\t}\n\n\terr = k.validateAndUpdateDefault()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstartTime := time.Now()\n\n\tlog.Println(\"total master - \" + fmt.Sprintf(\"%v\", len(k.MasterNodes)))\n\tlog.Println(\"total workers - \" + fmt.Sprintf(\"%v\", len(k.WorkerNodes)))\n\n\tif k.HaProxyNode != nil {\n\t\tlog.Println(\"total haproxy - \" + fmt.Sprintf(\"%v\", 1))\n\t}\n\n\tmasterCreationStartTime := time.Now()\n\tjoinCommand, err = k.setupMaster(k.determineSetup())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"time taken to create masters = %v\", time.Since(masterCreationStartTime))\n\n\tworkerCreationTime := time.Now()\n\n\tif err := k.setupWorkers(joinCommand); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"time taken to create workers = %v\", time.Since(workerCreationTime))\n\n\tfor _, file := range k.ApplyFiles {\n\t\terr := k.MasterNodes[0].applyFile(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif k.Networking != nil {\n\t\tlog.Printf(\"installing networking plugin = %v\", k.Networking.Name)\n\t\terr := k.MasterNodes[0].applyFile(k.Networking.Manifests)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Println(\"no network plugin found\")\n\t}\n\n\tlog.Printf(\"Time taken to create cluster %v\\n\", time.Since(startTime).String())\n\n\treturn nil\n}", "func (client *ApiECSClient) CreateCluster(clusterName string) (string, error) {\n\tsvcRequest := svc.NewCreateClusterRequest()\n\tsvcRequest.SetClusterName(&clusterName)\n\n\tsvcClient, err := client.serviceClient()\n\tif err != nil {\n\t\tlog.Error(\"Unable to get service client for frontend\", \"err\", err)\n\t\treturn \"\", err\n\t}\n\n\tresp, err := svcClient.CreateCluster(svcRequest)\n\tif err != nil {\n\t\tlog.Crit(\"Could not register\", \"err\", err)\n\t\treturn \"\", err\n\t}\n\tlog.Info(\"Created a cluster!\", \"clusterName\", clusterName)\n\treturn *resp.Cluster().ClusterArn(), nil\n\n}", "func (adm Admin) AddCluster(cluster string) bool {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer conn.Disconnect()\n\n\tkb := KeyBuilder{cluster}\n\t// c = \"/<cluster>\"\n\tc := kb.cluster()\n\n\t// check if cluster already exists\n\texists, err := conn.Exists(c)\n\tmust(err)\n\tif exists {\n\t\treturn false\n\t}\n\n\tconn.CreateEmptyNode(c)\n\n\t// PROPERTYSTORE is an empty node\n\tpropertyStore := fmt.Sprintf(\"/%s/PROPERTYSTORE\", cluster)\n\tconn.CreateEmptyNode(propertyStore)\n\n\t// STATEMODELDEFS has 6 children\n\tstateModelDefs := fmt.Sprintf(\"/%s/STATEMODELDEFS\", cluster)\n\tconn.CreateEmptyNode(stateModelDefs)\n\tconn.CreateRecordWithData(stateModelDefs+\"/LeaderStandby\", HelixDefaultNodes[\"LeaderStandby\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/MasterSlave\", HelixDefaultNodes[\"MasterSlave\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/OnlineOffline\", HelixDefaultNodes[\"OnlineOffline\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/STORAGE_DEFAULT_SM_SCHEMATA\", HelixDefaultNodes[\"STORAGE_DEFAULT_SM_SCHEMATA\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/SchedulerTaskQueue\", HelixDefaultNodes[\"SchedulerTaskQueue\"])\n\tconn.CreateRecordWithData(stateModelDefs+\"/Task\", HelixDefaultNodes[\"Task\"])\n\n\t// INSTANCES is initailly an empty node\n\tinstances := fmt.Sprintf(\"/%s/INSTANCES\", cluster)\n\tconn.CreateEmptyNode(instances)\n\n\t// CONFIGS has 3 children: CLUSTER, RESOURCE, PARTICIPANT\n\tconfigs := fmt.Sprintf(\"/%s/CONFIGS\", cluster)\n\tconn.CreateEmptyNode(configs)\n\tconn.CreateEmptyNode(configs + \"/PARTICIPANT\")\n\tconn.CreateEmptyNode(configs + \"/RESOURCE\")\n\tconn.CreateEmptyNode(configs + \"/CLUSTER\")\n\n\tclusterNode := NewRecord(cluster)\n\tconn.CreateRecordWithPath(configs+\"/CLUSTER/\"+cluster, clusterNode)\n\n\t// empty ideal states\n\tidealStates := fmt.Sprintf(\"/%s/IDEALSTATES\", cluster)\n\tconn.CreateEmptyNode(idealStates)\n\n\t// empty external view\n\texternalView := fmt.Sprintf(\"/%s/EXTERNALVIEW\", cluster)\n\tconn.CreateEmptyNode(externalView)\n\n\t// empty live instances\n\tliveInstances := fmt.Sprintf(\"/%s/LIVEINSTANCES\", cluster)\n\tconn.CreateEmptyNode(liveInstances)\n\n\t// CONTROLLER has four childrens: [ERRORS, HISTORY, MESSAGES, STATUSUPDATES]\n\tcontroller := fmt.Sprintf(\"/%s/CONTROLLER\", cluster)\n\tconn.CreateEmptyNode(controller)\n\tconn.CreateEmptyNode(controller + \"/ERRORS\")\n\tconn.CreateEmptyNode(controller + \"/HISTORY\")\n\tconn.CreateEmptyNode(controller + \"/MESSAGES\")\n\tconn.CreateEmptyNode(controller + \"/STATUSUPDATES\")\n\n\treturn true\n}", "func (q *QueryResolver) CreateCluster(ctx context.Context) (*ClusterInfoResolver, error) {\n\treturn nil, errors.New(\"Deprecated. Please use `px deploy`\")\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"applyImmediately\"] = nil\n\t\tinputs[\"availabilityZones\"] = nil\n\t\tinputs[\"backupRetentionPeriod\"] = nil\n\t\tinputs[\"clusterIdentifier\"] = nil\n\t\tinputs[\"clusterIdentifierPrefix\"] = nil\n\t\tinputs[\"engine\"] = nil\n\t\tinputs[\"engineVersion\"] = nil\n\t\tinputs[\"finalSnapshotIdentifier\"] = nil\n\t\tinputs[\"iamDatabaseAuthenticationEnabled\"] = nil\n\t\tinputs[\"iamRoles\"] = nil\n\t\tinputs[\"kmsKeyArn\"] = nil\n\t\tinputs[\"neptuneClusterParameterGroupName\"] = nil\n\t\tinputs[\"neptuneSubnetGroupName\"] = nil\n\t\tinputs[\"port\"] = nil\n\t\tinputs[\"preferredBackupWindow\"] = nil\n\t\tinputs[\"preferredMaintenanceWindow\"] = nil\n\t\tinputs[\"replicationSourceIdentifier\"] = nil\n\t\tinputs[\"skipFinalSnapshot\"] = nil\n\t\tinputs[\"snapshotIdentifier\"] = nil\n\t\tinputs[\"storageEncrypted\"] = nil\n\t\tinputs[\"tags\"] = nil\n\t\tinputs[\"vpcSecurityGroupIds\"] = nil\n\t} else {\n\t\tinputs[\"applyImmediately\"] = args.ApplyImmediately\n\t\tinputs[\"availabilityZones\"] = args.AvailabilityZones\n\t\tinputs[\"backupRetentionPeriod\"] = args.BackupRetentionPeriod\n\t\tinputs[\"clusterIdentifier\"] = args.ClusterIdentifier\n\t\tinputs[\"clusterIdentifierPrefix\"] = args.ClusterIdentifierPrefix\n\t\tinputs[\"engine\"] = args.Engine\n\t\tinputs[\"engineVersion\"] = args.EngineVersion\n\t\tinputs[\"finalSnapshotIdentifier\"] = args.FinalSnapshotIdentifier\n\t\tinputs[\"iamDatabaseAuthenticationEnabled\"] = args.IamDatabaseAuthenticationEnabled\n\t\tinputs[\"iamRoles\"] = args.IamRoles\n\t\tinputs[\"kmsKeyArn\"] = args.KmsKeyArn\n\t\tinputs[\"neptuneClusterParameterGroupName\"] = args.NeptuneClusterParameterGroupName\n\t\tinputs[\"neptuneSubnetGroupName\"] = args.NeptuneSubnetGroupName\n\t\tinputs[\"port\"] = args.Port\n\t\tinputs[\"preferredBackupWindow\"] = args.PreferredBackupWindow\n\t\tinputs[\"preferredMaintenanceWindow\"] = args.PreferredMaintenanceWindow\n\t\tinputs[\"replicationSourceIdentifier\"] = args.ReplicationSourceIdentifier\n\t\tinputs[\"skipFinalSnapshot\"] = args.SkipFinalSnapshot\n\t\tinputs[\"snapshotIdentifier\"] = args.SnapshotIdentifier\n\t\tinputs[\"storageEncrypted\"] = args.StorageEncrypted\n\t\tinputs[\"tags\"] = args.Tags\n\t\tinputs[\"vpcSecurityGroupIds\"] = args.VpcSecurityGroupIds\n\t}\n\tinputs[\"arn\"] = nil\n\tinputs[\"clusterMembers\"] = nil\n\tinputs[\"clusterResourceId\"] = nil\n\tinputs[\"endpoint\"] = nil\n\tinputs[\"hostedZoneId\"] = nil\n\tinputs[\"readerEndpoint\"] = nil\n\ts, err := ctx.RegisterResource(\"aws:neptune/cluster:Cluster\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func (api *clusterAPI) Create(obj *cluster.Cluster) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.ClusterV1().Cluster().Create(context.Background(), obj)\n\t\tif err != nil && strings.Contains(err.Error(), \"AlreadyExists\") {\n\t\t\t_, err = apicl.ClusterV1().Cluster().Update(context.Background(), obj)\n\n\t\t}\n\t\treturn err\n\t}\n\n\tapi.ct.handleClusterEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Created})\n\treturn nil\n}", "func (adm Admin) AddCluster(cluster string, recreateIfExists bool) bool {\n\tkb := &KeyBuilder{cluster}\n\t// c = \"/<cluster>\"\n\tc := kb.cluster()\n\n\t// check if cluster already exists\n\texists, _, err := adm.zkClient.Exists(c)\n\tif err != nil || (exists && !recreateIfExists) {\n\t\treturn false\n\t}\n\n\tif recreateIfExists {\n\t\tif err := adm.zkClient.DeleteTree(c); err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tadm.zkClient.CreateEmptyNode(c)\n\n\t// PROPERTYSTORE is an empty node\n\tpropertyStore := fmt.Sprintf(\"/%s/PROPERTYSTORE\", cluster)\n\tadm.zkClient.CreateEmptyNode(propertyStore)\n\n\t// STATEMODELDEFS has 6 children\n\tstateModelDefs := fmt.Sprintf(\"/%s/STATEMODELDEFS\", cluster)\n\tadm.zkClient.CreateEmptyNode(stateModelDefs)\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/LeaderStandby\", []byte(_helixDefaultNodes[\"LeaderStandby\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/MasterSlave\", []byte(_helixDefaultNodes[\"MasterSlave\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/OnlineOffline\", []byte(_helixDefaultNodes[StateModelNameOnlineOffline]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/STORAGE_DEFAULT_SM_SCHEMATA\",\n\t\t[]byte(_helixDefaultNodes[\"STORAGE_DEFAULT_SM_SCHEMATA\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/SchedulerTaskQueue\", []byte(_helixDefaultNodes[\"SchedulerTaskQueue\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/Task\", []byte(_helixDefaultNodes[\"Task\"]))\n\n\t// INSTANCES is initailly an empty node\n\tinstances := fmt.Sprintf(\"/%s/INSTANCES\", cluster)\n\tadm.zkClient.CreateEmptyNode(instances)\n\n\t// CONFIGS has 3 children: CLUSTER, RESOURCE, PARTICIPANT\n\tconfigs := fmt.Sprintf(\"/%s/CONFIGS\", cluster)\n\tadm.zkClient.CreateEmptyNode(configs)\n\tadm.zkClient.CreateEmptyNode(configs + \"/PARTICIPANT\")\n\tadm.zkClient.CreateEmptyNode(configs + \"/RESOURCE\")\n\tadm.zkClient.CreateEmptyNode(configs + \"/CLUSTER\")\n\n\tclusterNode := model.NewMsg(cluster)\n\taccessor := newDataAccessor(adm.zkClient, kb)\n\taccessor.createMsg(configs+\"/CLUSTER/\"+cluster, clusterNode)\n\n\t// empty ideal states\n\tidealStates := fmt.Sprintf(\"/%s/IDEALSTATES\", cluster)\n\tadm.zkClient.CreateEmptyNode(idealStates)\n\n\t// empty external view\n\texternalView := fmt.Sprintf(\"/%s/EXTERNALVIEW\", cluster)\n\tadm.zkClient.CreateEmptyNode(externalView)\n\n\t// empty live instances\n\tliveInstances := fmt.Sprintf(\"/%s/LIVEINSTANCES\", cluster)\n\tadm.zkClient.CreateEmptyNode(liveInstances)\n\n\t// CONTROLLER has four childrens: [ERRORS, HISTORY, MESSAGES, STATUSUPDATES]\n\tcontroller := fmt.Sprintf(\"/%s/CONTROLLER\", cluster)\n\tadm.zkClient.CreateEmptyNode(controller)\n\tadm.zkClient.CreateEmptyNode(controller + \"/ERRORS\")\n\tadm.zkClient.CreateEmptyNode(controller + \"/HISTORY\")\n\tadm.zkClient.CreateEmptyNode(controller + \"/MESSAGES\")\n\tadm.zkClient.CreateEmptyNode(controller + \"/STATUSUPDATES\")\n\n\treturn true\n}", "func (vp *scalewayProvider) CreateCluster(log *logging.Logger, options providers.CreateClusterOptions, dnsProvider providers.DnsProvider) error {\n\twg := sync.WaitGroup{}\n\terrors := make(chan error, options.InstanceCount)\n\tinstanceDatas := make(chan instanceData, options.InstanceCount)\n\tfor i := 1; i <= options.InstanceCount; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\ttime.Sleep(time.Duration((i - 1)) * time.Second * 10)\n\t\t\tisCore := true\n\t\t\tisLB := true\n\t\t\tinstanceOptions, err := options.NewCreateInstanceOptions(isCore, isLB, i)\n\t\t\tif err != nil {\n\t\t\t\terrors <- maskAny(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tinstance, err := vp.CreateInstance(log, instanceOptions, dnsProvider)\n\t\t\tif err != nil {\n\t\t\t\terrors <- maskAny(err)\n\t\t\t} else {\n\t\t\t\tinstanceDatas <- instanceData{\n\t\t\t\t\tCreateInstanceOptions: instanceOptions,\n\t\t\t\t\tClusterInstance: instance,\n\t\t\t\t\tFleetMetadata: instanceOptions.CreateFleetMetadata(i),\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\twg.Wait()\n\tclose(errors)\n\tclose(instanceDatas)\n\terr := <-errors\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\tinstances := []instanceData{}\n\tinstanceList := providers.ClusterInstanceList{}\n\tfor data := range instanceDatas {\n\t\tinstances = append(instances, data)\n\t\tinstanceList = append(instanceList, data.ClusterInstance)\n\t}\n\n\tclusterMembers, err := instanceList.AsClusterMemberList(log, nil)\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\t// Create tinc network config\n\tif instanceList.ReconfigureTincCluster(vp.Logger); err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\tif err := vp.setupInstances(log, instances, clusterMembers); err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\treturn nil\n}", "func (mr *MockRdbClientMockRecorder) CreateCluster(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateCluster\", reflect.TypeOf((*MockRdbClient)(nil).CreateCluster), varargs...)\n}", "func (s *StorageClusterAPI) Create(w http.ResponseWriter, r *http.Request) {\n\tstorage := &config.StorageCluster{}\n\terr := api.GetJSONBodyFromRequest(r, storage)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\terr = s.storageClusterService.Save(storage)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\tapi.NoContent(w)\n}", "func CreateCluster(data []int) Cluster {\n\treturn Cluster{\n\t\tindices: append([]int(nil), data...),\n\t}\n}", "func (c *krakenClusters) Create(krakenCluster *v1alpha1.KrakenCluster) (result *v1alpha1.KrakenCluster, err error) {\n\tresult = &v1alpha1.KrakenCluster{}\n\terr = c.client.Post().\n\t\tNamespace(c.ns).\n\t\tResource(\"krakenclusters\").\n\t\tBody(krakenCluster).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (m *K3dClusterManager) Create(ctx context.Context, opts CreateOptions) error {\n\tk3sImage := fmt.Sprintf(\"%s:%s\", types.DefaultK3sImageRepo, k3sVersion)\n\n\thostStoragePath := filepath.Join(m.cfg.WorkDir.Path, HostStorageName)\n\tif err := os.MkdirAll(hostStoragePath, 0700); err != nil {\n\t\treturn fmt.Errorf(\"failed to make the host storage directory: %w\", err)\n\t}\n\n\tlocalStorage := fmt.Sprintf(\"%s:%s\",\n\t\thostStoragePath,\n\t\tk3sLocalStoragePath)\n\tvolumes := []string{\n\t\tlocalStorage,\n\t}\n\n\t// If /dev/mapper exists, we'll automatically map it into the cluster\n\t// controller.\n\tif _, err := os.Stat(\"/dev/mapper\"); !os.IsNotExist(err) {\n\t\tvolumes = append(volumes, \"/dev/mapper:/dev/mapper:ro\")\n\t}\n\n\texposeAPI := types.ExposeAPI{\n\t\tHost: types.DefaultAPIHost,\n\t\tHostIP: types.DefaultAPIHost,\n\t\tPort: types.DefaultAPIPort,\n\t}\n\n\tregistryPortMapping := fmt.Sprintf(\"%d:%d\", opts.ImageRegistryPort, opts.ImageRegistryPort)\n\n\tserverNode := &types.Node{\n\t\tRole: types.ServerRole,\n\t\tImage: k3sImage,\n\t\tServerOpts: types.ServerOpts{\n\t\t\tExposeAPI: exposeAPI,\n\t\t},\n\t\tVolumes: volumes,\n\t\tPorts: []string{registryPortMapping},\n\t}\n\n\tnodes := []*types.Node{\n\t\tserverNode,\n\t}\n\n\tfor i := 0; i < WorkerCount; i++ {\n\t\tnode := &types.Node{\n\t\t\tRole: types.AgentRole,\n\t\t\tImage: k3sImage,\n\t\t\tArgs: agentArgs,\n\t\t\tVolumes: volumes,\n\t\t}\n\n\t\tnodes = append(nodes, node)\n\t}\n\n\tnetwork := types.ClusterNetwork{\n\t\tName: NetworkName,\n\t}\n\n\tlbHostPort := DefaultLoadBalancerHostPort\n\tif opts.LoadBalancerHostPort != 0 {\n\t\tlbHostPort = opts.LoadBalancerHostPort\n\t}\n\n\tlbPortMapping := fmt.Sprintf(\"%d:%d\", lbHostPort, DefaultLoadBalancerNodePort)\n\n\tclusterConfig := &types.Cluster{\n\t\tName: ClusterName,\n\t\tServerLoadBalancer: &types.Node{\n\t\t\tRole: types.LoadBalancerRole,\n\t\t\tPorts: []string{lbPortMapping},\n\t\t},\n\t\tNodes: nodes,\n\t\tCreateClusterOpts: &types.ClusterCreateOpts{\n\t\t\tWaitForServer: true,\n\t\t},\n\t\tNetwork: network,\n\t\tExposeAPI: exposeAPI,\n\t}\n\n\tif err := k3dcluster.ClusterCreate(ctx, m.runtime, clusterConfig); err != nil {\n\t\treturn fmt.Errorf(\"failed to create cluster: %w\", err)\n\t}\n\n\treturn nil\n}", "func Create(req clusterapi.Request) (clusterapi.ClusterAPI, error) {\n\t// Validates parameters\n\tif req.Name == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.Name: can't be empty\")\n\t}\n\tif req.CIDR == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.CIDR: can't be empty\")\n\t}\n\n\t// We need at first the Metadata container to be present\n\terr := utils.CreateMetadataContainer()\n\tif err != nil {\n\t\tfmt.Printf(\"failed to create Object Container: %s\\n\", err.Error())\n\t}\n\n\tvar network *pb.Network\n\tvar instance clusterapi.ClusterAPI\n\n\tlog.Printf(\"Creating infrastructure for cluster '%s'\", req.Name)\n\n\ttenant, err := utils.GetCurrentTenant()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Creates network\n\tlog.Printf(\"Creating Network 'net-%s'\", req.Name)\n\treq.Name = strings.ToLower(req.Name)\n\tnetwork, err = utils.CreateNetwork(\"net-\"+req.Name, req.CIDR)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to create Network '%s': %s\", req.Name, err.Error())\n\t\treturn nil, err\n\t}\n\n\tswitch req.Flavor {\n\tcase Flavor.DCOS:\n\t\treq.NetworkID = network.ID\n\t\treq.Tenant = tenant\n\t\tinstance, err = dcos.NewCluster(req)\n\t\tif err != nil {\n\t\t\t//utils.DeleteNetwork(network.ID)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Printf(\"Cluster '%s' created and initialized successfully\", req.Name)\n\treturn instance, nil\n}", "func (d *deployer) CreateCluster(c *clusterv1.Cluster, machines []*clusterv1.Machine, enableMachineController bool) error {\n\tmaster := util.GetMaster(machines)\n\tif master == nil {\n\t\treturn fmt.Errorf(\"error creating master vm, no master found\")\n\t}\n\n\tif err := d.actuator.Create(master); err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Created master %s\", master.Name)\n\n\tif err := d.setMasterIP(master); err != nil {\n\t\treturn fmt.Errorf(\"unable to get master IP: %v\", err)\n\t}\n\n\tif err := d.copyKubeConfig(master); err != nil {\n\t\treturn fmt.Errorf(\"unable to write kubeconfig: %v\", err)\n\t}\n\n\tif err := d.createMachineCRD(machines); err != nil {\n\t\treturn err\n\t}\n\n\tif enableMachineController {\n\t\tif err := d.actuator.CreateMachineController(machines); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Printf(\"The [%s] cluster has been created successfully!\", c.Name)\n\tlog.Print(\"You can now `kubectl get nodes`\")\n\n\treturn nil\n}", "func (mcr *MiddlewareClusterRepo) Create(middlewareCluster metadata.MiddlewareCluster) (metadata.MiddlewareCluster, error) {\n\tsql := `insert into t_meta_middleware_cluster_info(cluster_name, owner_id, env_id) values(?, ?, ?);`\n\tlog.Debugf(\"metadata MiddlewareClusterRepo.Create() insert sql: %s\", sql)\n\t// execute\n\t_, err := mcr.Execute(sql,\n\t\tmiddlewareCluster.(*MiddlewareClusterInfo).ClusterName,\n\t\tmiddlewareCluster.(*MiddlewareClusterInfo).OwnerID,\n\t\tmiddlewareCluster.(*MiddlewareClusterInfo).EnvID,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// get id\n\tid, err := mcr.GetID(middlewareCluster.GetClusterName(), middlewareCluster.GetEnvID())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// get entity\n\treturn mcr.GetByID(id)\n}", "func (s *ClusterListener) Create(inctx context.Context, in *protocol.ClusterCreateRequest) (_ *protocol.ClusterResponse, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot create cluster\")\n\n\tif s == nil {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tname := in.GetName()\n\tjob, xerr := PrepareJob(inctx, in.GetTenantId(), fmt.Sprintf(\"/cluster/%s/create\", name))\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\tdefer job.Close()\n\n\tctx := job.Context()\n\n\tcfg, xerr := job.Service().GetConfigurationOptions(ctx)\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\tin.OperatorUsername = cfg.GetString(\"OperatorUsername\")\n\treq, xerr := converters.ClusterRequestFromProtocolToAbstract(in)\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\tif req.Tenant == \"\" {\n\t\treq.Tenant = job.Tenant()\n\t}\n\n\thandler := handlers.NewClusterHandler(job)\n\tinstance, xerr := handler.Create(*req)\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\treturn instance.ToProtocol(ctx)\n}", "func (fgsc *FakeGKESDKClient) create(project, location string, rb *container.CreateClusterRequest) (*container.Operation, error) {\n\tparent := fmt.Sprintf(\"projects/%s/locations/%s\", project, location)\n\tname := rb.Cluster.Name\n\tif cls, ok := fgsc.clusters[parent]; ok {\n\t\tfor _, cl := range cls {\n\t\t\tif cl.Name == name {\n\t\t\t\treturn nil, errors.New(\"cluster already exist\")\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfgsc.clusters[parent] = make([]*container.Cluster, 0)\n\t}\n\tcluster := &container.Cluster{\n\t\tName: name,\n\t\tLocation: location,\n\t\tStatus: \"RUNNING\",\n\t}\n\n\tfgsc.clusters[parent] = append(fgsc.clusters[parent], cluster)\n\treturn fgsc.newOp(), nil\n}", "func (s *ClusterStorage) Insert(ctx context.Context, cluster *types.Cluster) error {\n\n\tlog.V(logLevel).Debugf(\"storage:etcd:cluster:> insert: %v\", cluster)\n\n\tif cluster == nil {\n\t\treturn errors.New(store.ErrStructArgIsNil)\n\t}\n\n\tclient, destroy, err := getClient(ctx)\n\tif err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:cluster:> insert err: %s\", err.Error())\n\t\treturn err\n\t}\n\tdefer destroy()\n\n\tkeyMeta := keyCreate(clusterStorage, \"meta\")\n\tif err := client.Create(ctx, keyMeta, cluster.Meta, nil, 0); err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:cluster:> insert err: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tkeyStatus := keyCreate(clusterStorage, \"status\")\n\tif err := client.Create(ctx, keyStatus, cluster.Status, nil, 0); err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:cluster:> insert err: %s\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (k Kind) CreateCluster() error {\n\tcmd := kindCommand(\"kind create cluster --config /etc/kind/config.yml\")\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tcmd = kindCommand(\"patch-kubeconfig.sh\")\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\t// This command is necessary to fix the coredns loop detected when using user-defined docker network.\n\t// In that case /etc/resolv.conf use 127.0.0.11 as DNS and CoreDNS thinks it is talking to itself which is wrong.\n\t// This IP is the docker internal DNS so it is safe to disable the loop check.\n\tcmd = kindCommand(\"sh -c 'kubectl -n kube-system get configmap/coredns -o yaml | grep -v loop | kubectl replace -f -'\")\n\terr := cmd.Run()\n\n\treturn err\n}", "func (mcs *MySQLClusterService) Create(fields map[string]interface{}) error {\n\t// generate new map\n\t_, clusterNameExists := fields[clusterNameStruct]\n\t_, envIDExists := fields[envIDStruct]\n\n\tif !clusterNameExists || !envIDExists {\n\t\treturn message.NewMessage(\n\t\t\tmessage.ErrFieldNotExists,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"%s and %s\",\n\t\t\t\tclusterNameStruct,\n\t\t\t\tenvIDStruct))\n\t}\n\n\t// create a new entity\n\tmysqlClusterInfo, err := NewMySQLClusterInfoWithMapAndRandom(fields)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// insert into middleware\n\tmysqlCluster, err := mcs.MySQLClusterRepo.Create(mysqlClusterInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmcs.MySQLClusters = append(mcs.MySQLClusters, mysqlCluster)\n\treturn nil\n}", "func CreateKindCluster(clusterName string) error {\n\tlb, err := actions.SetUpLoadBalancer(clusterName)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create load balancer\")\n\t}\n\n\tlbipv4, _, err := lb.IP()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get ELB IP\")\n\t}\n\n\tcpMounts := []cri.Mount{\n\t\t{\n\t\t\tContainerPath: \"/var/run/docker.sock\",\n\t\t\tHostPath: \"/var/run/docker.sock\",\n\t\t},\n\t\t{\n\n\t\t\tContainerPath: \"/var/lib/docker\",\n\t\t\tHostPath: \"/var/lib/docker\",\n\t\t},\n\t}\n\n\tcp, err := actions.CreateControlPlane(clusterName, fmt.Sprintf(\"%s-control-plane\", clusterName), lbipv4, \"v1.14.2\", cpMounts)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't create control plane\")\n\t}\n\n\t// Remove taint from the management cluster\n\tif err := cp.Command(\"kubectl\", \"--kubeconfig=/etc/kubernetes/admin.conf\", \"taint\", \"nodes\", \"--all\", \"node-role.kubernetes.io/master-\").Run(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to remove taint from master\")\n\t}\n\n\tif !nodes.WaitForReady(cp, time.Now().Add(5*time.Minute)) {\n\t\treturn errors.New(\"control plane was not ready in 5 minutes\")\n\t}\n\n\treturn nil\n}", "func (r *ProjectsInstancesClustersService) Create(parent string, cluster *Cluster) *ProjectsInstancesClustersCreateCall {\n\tc := &ProjectsInstancesClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\tc.cluster = cluster\n\treturn c\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\tvar resource Cluster\n\terr := ctx.RegisterResource(\"aws:docdb/cluster:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (p EksProvisioner) Create(dryRun bool) error {\n\n\tclusterExists, err := p.clusterExists()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif clusterExists {\n\t\tlog.Logger.Debugf(\"An EKS cluster already exists called '%s'. Won't recreate it...\",\n\t\t\tp.GetStack().GetConfig().GetCluster())\n\t\treturn nil\n\t}\n\n\ttemplatedVars, err := p.stack.GetTemplatedVars(nil, map[string]interface{}{})\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tlog.Logger.Debugf(\"Templated stack config vars: %#v\", templatedVars)\n\n\targs := []string{\"create\", \"cluster\"}\n\targs = parameteriseValues(args, p.eksConfig.Params.Global)\n\targs = parameteriseValues(args, p.eksConfig.Params.CreateCluster)\n\n\tconfigFilePath, err := p.writeConfigFile()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif configFilePath != \"\" {\n\t\targs = append(args, []string{\"-f\", configFilePath}...)\n\t}\n\n\t_, err = printer.Fprintf(\"Creating EKS cluster (this may take some time)...\\n\")\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = utils.ExecCommandUnbuffered(p.eksConfig.Binary, args, map[string]string{},\n\t\tos.Stdout, os.Stderr, \"\", 0, 0, dryRun)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif !dryRun {\n\t\tlog.Logger.Infof(\"EKS cluster created\")\n\n\t\terr = p.renameKubeContext()\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\n\tp.stack.GetStatus().SetStartedThisRun(true)\n\t// only sleep before checking the cluster fo readiness if we started it\n\tp.stack.GetStatus().SetSleepBeforeReadyCheck(eksSleepSecondsBeforeReadyCheck)\n\n\treturn nil\n}", "func CreateClusterAzure(request *banzaiTypes.CreateClusterRequest, c *gin.Context) (bool, *banzaiSimpleTypes.ClusterSimple) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Start create cluster (azure)\")\n\n\tif request == nil {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Create request is <nil>\")\n\t\treturn false, nil\n\t}\n\n\tcluster2Db := banzaiSimpleTypes.ClusterSimple{\n\t\tName: request.Name,\n\t\tLocation: request.Location,\n\t\tNodeInstanceType: request.NodeInstanceType,\n\t\tCloud: request.Cloud,\n\t\tAzure: banzaiSimpleTypes.AzureClusterSimple{\n\t\t\tResourceGroup: request.Properties.CreateClusterAzure.Node.ResourceGroup,\n\t\t\tAgentCount: request.Properties.CreateClusterAzure.Node.AgentCount,\n\t\t\tAgentName: request.Properties.CreateClusterAzure.Node.AgentName,\n\t\t\tKubernetesVersion: request.Properties.CreateClusterAzure.Node.KubernetesVersion,\n\t\t},\n\t}\n\n\tr := azureCluster.CreateClusterRequest{\n\t\tName: cluster2Db.Name,\n\t\tLocation: cluster2Db.Location,\n\t\tVMSize: cluster2Db.NodeInstanceType,\n\t\tResourceGroup: cluster2Db.Azure.ResourceGroup,\n\t\tAgentCount: cluster2Db.Azure.AgentCount,\n\t\tAgentName: cluster2Db.Azure.AgentName,\n\t\tKubernetesVersion: cluster2Db.Azure.KubernetesVersion,\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Call azure client\")\n\n\t// call creation\n\t_, err := azureClient.CreateUpdateCluster(r)\n\tif err != nil {\n\t\t// creation failed\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Cluster creation failed!\", err.Message)\n\t\tSetResponseBodyJson(c, err.StatusCode, gin.H{\n\t\t\tJsonKeyStatus: err.StatusCode,\n\t\t\tJsonKeyMessage: err.Message,\n\t\t})\n\t\treturn false, nil\n\t} else {\n\t\t// creation success\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Cluster created successfully!\")\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Save create cluster into database\")\n\n\t\tif err := database.Save(&cluster2Db).Error; err != nil {\n\t\t\tDbSaveFailed(c, err, cluster2Db.Name)\n\t\t\treturn false, nil\n\t\t}\n\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateCluster, \"Save create cluster into database succeeded\")\n\n\t\t// polling cluster\n\t\tpollingRes, err := azureClient.PollingCluster(r.Name, r.ResourceGroup)\n\t\tif err != nil {\n\t\t\t// polling error\n\t\t\tSetResponseBodyJson(c, err.StatusCode, err)\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\t// polling success\n\t\t\tSetResponseBodyJson(c, pollingRes.StatusCode, gin.H{\n\t\t\t\tJsonKeyStatus: pollingRes.StatusCode,\n\t\t\t\tJsonKeyResourceId: cluster2Db.ID,\n\t\t\t\tJsonKeyData: pollingRes.Value,\n\t\t\t})\n\t\t\treturn true, &cluster2Db\n\t\t}\n\t}\n\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tif args == nil || args.BrokerNodeGroupInfo == nil {\n\t\treturn nil, errors.New(\"missing required argument 'BrokerNodeGroupInfo'\")\n\t}\n\tif args == nil || args.ClusterName == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ClusterName'\")\n\t}\n\tif args == nil || args.KafkaVersion == nil {\n\t\treturn nil, errors.New(\"missing required argument 'KafkaVersion'\")\n\t}\n\tif args == nil || args.NumberOfBrokerNodes == nil {\n\t\treturn nil, errors.New(\"missing required argument 'NumberOfBrokerNodes'\")\n\t}\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"brokerNodeGroupInfo\"] = nil\n\t\tinputs[\"clientAuthentication\"] = nil\n\t\tinputs[\"clusterName\"] = nil\n\t\tinputs[\"configurationInfo\"] = nil\n\t\tinputs[\"encryptionInfo\"] = nil\n\t\tinputs[\"enhancedMonitoring\"] = nil\n\t\tinputs[\"kafkaVersion\"] = nil\n\t\tinputs[\"numberOfBrokerNodes\"] = nil\n\t\tinputs[\"tags\"] = nil\n\t} else {\n\t\tinputs[\"brokerNodeGroupInfo\"] = args.BrokerNodeGroupInfo\n\t\tinputs[\"clientAuthentication\"] = args.ClientAuthentication\n\t\tinputs[\"clusterName\"] = args.ClusterName\n\t\tinputs[\"configurationInfo\"] = args.ConfigurationInfo\n\t\tinputs[\"encryptionInfo\"] = args.EncryptionInfo\n\t\tinputs[\"enhancedMonitoring\"] = args.EnhancedMonitoring\n\t\tinputs[\"kafkaVersion\"] = args.KafkaVersion\n\t\tinputs[\"numberOfBrokerNodes\"] = args.NumberOfBrokerNodes\n\t\tinputs[\"tags\"] = args.Tags\n\t}\n\tinputs[\"arn\"] = nil\n\tinputs[\"bootstrapBrokers\"] = nil\n\tinputs[\"bootstrapBrokersTls\"] = nil\n\tinputs[\"currentVersion\"] = nil\n\tinputs[\"zookeeperConnectString\"] = nil\n\ts, err := ctx.RegisterResource(\"aws:msk/cluster:Cluster\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func Create(ctx context.Context, client *v1.ServiceClient, clusterID string, opts *CreateOpts) (*v1.ResponseResult, error) {\n\tcreateNodegroupOpts := struct {\n\t\tNodegroup *CreateOpts `json:\"nodegroup\"`\n\t}{\n\t\tNodegroup: opts,\n\t}\n\trequestBody, err := json.Marshal(createNodegroupOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\turl := strings.Join([]string{client.Endpoint, v1.ResourceURLCluster, clusterID, v1.ResourceURLNodegroup}, \"/\")\n\tresponseResult, err := client.DoRequest(ctx, http.MethodPost, url, bytes.NewReader(requestBody))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif responseResult.Err != nil {\n\t\terr = responseResult.Err\n\t}\n\n\treturn responseResult, err\n}", "func NewCluster(name string, newGroup NewGroup, raftBind, raftDir string) *Cluster {\n\tslots := make(map[int]*Slot, SlotNum)\n\tfor i := 0; i < SlotNum; i++ {\n\t\tslots[i] = NewSlot(i, SlotStateOffline, nil, nil)\n\t}\n\treturn &Cluster{\n\t\tname: name,\n\t\tslots: slots,\n\t\tnewGroup: newGroup,\n\t\tgroups: make(map[int]Group),\n\t\traftBind: raftBind,\n\t\traftDir: raftDir,\n\t}\n}", "func createCluster(\n\tctx context.Context,\n\tc *cli.Context,\n\tcfgHelper *cmdutils.ConfigHelper,\n\thost host.Host,\n\tpubsub *pubsub.PubSub,\n\tdht *dual.DHT,\n\tstore ds.Datastore,\n\traftStaging bool,\n) (*ipfscluster.Cluster, error) {\n\n\tcfgs := cfgHelper.Configs()\n\tcfgMgr := cfgHelper.Manager()\n\tcfgBytes, err := cfgMgr.ToDisplayJSON()\n\tcheckErr(\"getting configuration string\", err)\n\tlogger.Debugf(\"Configuration:\\n%s\\n\", cfgBytes)\n\n\tctx, err = tag.New(ctx, tag.Upsert(observations.HostKey, host.ID().Pretty()))\n\tcheckErr(\"tag context with host id\", err)\n\n\terr = observations.SetupMetrics(cfgs.Metrics)\n\tcheckErr(\"setting up Metrics\", err)\n\n\ttracer, err := observations.SetupTracing(cfgs.Tracing)\n\tcheckErr(\"setting up Tracing\", err)\n\n\tvar apis []ipfscluster.API\n\tif cfgMgr.IsLoadedFromJSON(config.API, cfgs.Restapi.ConfigKey()) {\n\t\tvar api *rest.API\n\t\t// Do NOT enable default Libp2p API endpoint on CRDT\n\t\t// clusters. Collaborative clusters are likely to share the\n\t\t// secret with untrusted peers, thus the API would be open for\n\t\t// anyone.\n\t\tif cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {\n\t\t\tapi, err = rest.NewAPIWithHost(ctx, cfgs.Restapi, host)\n\t\t} else {\n\t\t\tapi, err = rest.NewAPI(ctx, cfgs.Restapi)\n\t\t}\n\t\tcheckErr(\"creating REST API component\", err)\n\t\tapis = append(apis, api)\n\n\t}\n\n\tif cfgMgr.IsLoadedFromJSON(config.API, cfgs.Pinsvcapi.ConfigKey()) {\n\t\tpinsvcapi, err := pinsvcapi.NewAPI(ctx, cfgs.Pinsvcapi)\n\t\tcheckErr(\"creating Pinning Service API component\", err)\n\n\t\tapis = append(apis, pinsvcapi)\n\t}\n\n\tif cfgMgr.IsLoadedFromJSON(config.API, cfgs.Ipfsproxy.ConfigKey()) {\n\t\tproxy, err := ipfsproxy.New(cfgs.Ipfsproxy)\n\t\tcheckErr(\"creating IPFS Proxy component\", err)\n\n\t\tapis = append(apis, proxy)\n\t}\n\n\tconnector, err := ipfshttp.NewConnector(cfgs.Ipfshttp)\n\tcheckErr(\"creating IPFS Connector component\", err)\n\n\tvar informers []ipfscluster.Informer\n\tif cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.DiskInf.ConfigKey()) {\n\t\tdiskInf, err := disk.NewInformer(cfgs.DiskInf)\n\t\tcheckErr(\"creating disk informer\", err)\n\t\tinformers = append(informers, diskInf)\n\t}\n\tif cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.TagsInf.ConfigKey()) {\n\t\ttagsInf, err := tags.New(cfgs.TagsInf)\n\t\tcheckErr(\"creating numpin informer\", err)\n\t\tinformers = append(informers, tagsInf)\n\t}\n\n\tif cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.PinQueueInf.ConfigKey()) {\n\t\tpinQueueInf, err := pinqueue.New(cfgs.PinQueueInf)\n\t\tcheckErr(\"creating pinqueue informer\", err)\n\t\tinformers = append(informers, pinQueueInf)\n\t}\n\n\t// For legacy compatibility we need to make the allocator\n\t// automatically compatible with informers that have been loaded. For\n\t// simplicity we assume that anyone that does not specify an allocator\n\t// configuration (legacy configs), will be using \"freespace\"\n\tif !cfgMgr.IsLoadedFromJSON(config.Allocator, cfgs.BalancedAlloc.ConfigKey()) {\n\t\tcfgs.BalancedAlloc.AllocateBy = []string{\"freespace\"}\n\t}\n\talloc, err := balanced.New(cfgs.BalancedAlloc)\n\tcheckErr(\"creating allocator\", err)\n\n\tcons, err := setupConsensus(\n\t\tcfgHelper,\n\t\thost,\n\t\tdht,\n\t\tpubsub,\n\t\tstore,\n\t\traftStaging,\n\t)\n\tif err != nil {\n\t\tstore.Close()\n\t\tcheckErr(\"setting up Consensus\", err)\n\t}\n\n\tvar peersF func(context.Context) ([]peer.ID, error)\n\tif cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {\n\t\tpeersF = cons.Peers\n\t}\n\n\ttracker := stateless.New(cfgs.Statelesstracker, host.ID(), cfgs.Cluster.Peername, cons.State)\n\tlogger.Debug(\"stateless pintracker loaded\")\n\n\tmon, err := pubsubmon.New(ctx, cfgs.Pubsubmon, pubsub, peersF)\n\tif err != nil {\n\t\tstore.Close()\n\t\tcheckErr(\"setting up PeerMonitor\", err)\n\t}\n\n\treturn ipfscluster.NewCluster(\n\t\tctx,\n\t\thost,\n\t\tdht,\n\t\tcfgs.Cluster,\n\t\tstore,\n\t\tcons,\n\t\tapis,\n\t\tconnector,\n\t\ttracker,\n\t\tmon,\n\t\talloc,\n\t\tinformers,\n\t\ttracer,\n\t)\n}", "func CreateCreateClusterRequest() (request *CreateClusterRequest) {\n\trequest = &CreateClusterRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CS\", \"2015-12-15\", \"CreateCluster\", \"/clusters\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *ECS) CreateCluster(input *CreateClusterInput) (output *CreateClusterOutput, err error) {\n\treq, out := c.CreateClusterRequest(input)\n\toutput = out\n\terr = req.Send()\n\treturn\n}", "func NewCreateClusterRequestWithoutParam() *CreateClusterRequest {\n\n return &CreateClusterRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/clusters\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (client RoverClusterClient) createRoverCluster(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/roverClusters\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateRoverClusterResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/rover/20201210/RoverCluster/CreateRoverCluster\"\n\t\terr = common.PostProcessServiceError(err, \"RoverCluster\", \"CreateRoverCluster\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (st *GlusterStorage) createCluster(h *heketiclient.Client) (*heketiapi.ClusterInfoResponse, error) {\n\tvar err error\n\tvar hcluster *heketiapi.ClusterInfoResponse\n\n\t// Wait maximum of one minute\n\tfor count := 0; count < max_loops; count++ {\n\t\thcluster, err = h.ClusterCreate()\n\t\tif err != nil {\n\t\t\ttime.Sleep(max_wait)\n\t\t} else {\n\t\t\treturn hcluster, nil\n\t\t}\n\t}\n\n\treturn nil, err\n}", "func CreateHadoopCluster(resourceGroup, clusterName string, info StorageAccountInfo) (*hdinsight.Cluster, error) {\n\tclient, err := getClustersClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// the default duration is 15 minutes which is just a tad too short\n\tclient.PollingDuration = 20 * time.Minute\n\tutil.PrintAndLog(\"creating hadoop cluster\")\n\tfuture, err := client.Create(context.Background(), resourceGroup, clusterName, hdinsight.ClusterCreateParametersExtended{\n\t\tLocation: to.StringPtr(config.Location()),\n\t\tProperties: &hdinsight.ClusterCreateProperties{\n\t\t\tClusterVersion: to.StringPtr(\"3.6\"),\n\t\t\tOsType: hdinsight.Linux,\n\t\t\tTier: hdinsight.Standard,\n\t\t\tClusterDefinition: &hdinsight.ClusterDefinition{\n\t\t\t\tKind: to.StringPtr(\"hadoop\"),\n\t\t\t\tConfigurations: map[string]map[string]interface{}{\n\t\t\t\t\t\"gateway\": {\n\t\t\t\t\t\t\"restAuthCredential.isEnabled\": true,\n\t\t\t\t\t\t\"restAuthCredential.username\": \"admin\",\n\t\t\t\t\t\t\"restAuthCredential.password\": \"Thisisalamepasswordthatwillberemoved2.\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tComputeProfile: &hdinsight.ComputeProfile{\n\t\t\t\tRoles: &[]hdinsight.Role{\n\t\t\t\t\thdinsight.Role{\n\t\t\t\t\t\tName: to.StringPtr(\"headnode\"),\n\t\t\t\t\t\tTargetInstanceCount: to.Int32Ptr(2),\n\t\t\t\t\t\tHardwareProfile: &hdinsight.HardwareProfile{\n\t\t\t\t\t\t\tVMSize: to.StringPtr(\"Large\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tOsProfile: &hdinsight.OsProfile{\n\t\t\t\t\t\t\tLinuxOperatingSystemProfile: &hdinsight.LinuxOperatingSystemProfile{\n\t\t\t\t\t\t\t\tUsername: to.StringPtr(\"clusteruser\"),\n\t\t\t\t\t\t\t\tPassword: to.StringPtr(\"Thisisalamepasswordthatwillberemoved1.\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\thdinsight.Role{\n\t\t\t\t\t\tName: to.StringPtr(\"workernode\"),\n\t\t\t\t\t\tTargetInstanceCount: to.Int32Ptr(1),\n\t\t\t\t\t\tHardwareProfile: &hdinsight.HardwareProfile{\n\t\t\t\t\t\t\tVMSize: to.StringPtr(\"Large\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tOsProfile: &hdinsight.OsProfile{\n\t\t\t\t\t\t\tLinuxOperatingSystemProfile: &hdinsight.LinuxOperatingSystemProfile{\n\t\t\t\t\t\t\t\tUsername: to.StringPtr(\"clusteruser\"),\n\t\t\t\t\t\t\t\tPassword: to.StringPtr(\"Thisisalamepasswordthatwillberemoved1.\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\thdinsight.Role{\n\t\t\t\t\t\tName: to.StringPtr(\"zookeepernode\"),\n\t\t\t\t\t\tTargetInstanceCount: to.Int32Ptr(3),\n\t\t\t\t\t\tHardwareProfile: &hdinsight.HardwareProfile{\n\t\t\t\t\t\t\tVMSize: to.StringPtr(\"Small\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tOsProfile: &hdinsight.OsProfile{\n\t\t\t\t\t\t\tLinuxOperatingSystemProfile: &hdinsight.LinuxOperatingSystemProfile{\n\t\t\t\t\t\t\t\tUsername: to.StringPtr(\"clusteruser\"),\n\t\t\t\t\t\t\t\tPassword: to.StringPtr(\"Thisisalamepasswordthatwillberemoved1.\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tStorageProfile: &hdinsight.StorageProfile{\n\t\t\t\tStorageaccounts: &[]hdinsight.StorageAccount{\n\t\t\t\t\thdinsight.StorageAccount{\n\t\t\t\t\t\tName: &info.Name,\n\t\t\t\t\t\tContainer: &info.Container,\n\t\t\t\t\t\tIsDefault: to.BoolPtr(true),\n\t\t\t\t\t\tKey: &info.Key,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create cluster\")\n\t}\n\tutil.PrintAndLog(\"waiting for hadoop cluster to finish deploying, this will take a while...\")\n\terr = future.WaitForCompletionRef(context.Background(), client.Client)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed waiting for cluster creation\")\n\t}\n\tc, err := future.Result(*client)\n\treturn &c, err\n}", "func (s *BasePlSqlParserListener) ExitCreate_cluster(ctx *Create_clusterContext) {}", "func (client *Client) CreateClusterWithHostPool(request *CreateClusterWithHostPoolRequest) (response *CreateClusterWithHostPoolResponse, err error) {\n\tresponse = CreateCreateClusterWithHostPoolResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func CreateK3SRKE2Cluster(client *rancher.Client, rke2Cluster *apisV1.Cluster) (*v1.SteveAPIObject, error) {\n\tcluster, err := client.Steve.SteveType(ProvisioningSteveResouceType).Create(rke2Cluster)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = kwait.Poll(500*time.Millisecond, 2*time.Minute, func() (done bool, err error) {\n\t\tclient, err = client.ReLogin()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\t_, err = client.Steve.SteveType(ProvisioningSteveResouceType).ByID(cluster.ID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t} else {\n\t\t\treturn true, nil\n\t\t}\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient.Session.RegisterCleanupFunc(func() error {\n\t\tadminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tprovKubeClient, err := adminClient.GetKubeAPIProvisioningClient()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twatchInterface, err := provKubeClient.Clusters(cluster.ObjectMeta.Namespace).Watch(context.TODO(), metav1.ListOptions{\n\t\t\tFieldSelector: \"metadata.name=\" + cluster.ObjectMeta.Name,\n\t\t\tTimeoutSeconds: &defaults.WatchTimeoutSeconds,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclient, err = client.ReLogin()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = client.Steve.SteveType(ProvisioningSteveResouceType).Delete(cluster)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) {\n\t\t\tcluster := event.Object.(*apisV1.Cluster)\n\t\t\tif event.Type == watch.Error {\n\t\t\t\treturn false, fmt.Errorf(\"there was an error deleting cluster\")\n\t\t\t} else if event.Type == watch.Deleted {\n\t\t\t\treturn true, nil\n\t\t\t} else if cluster == nil {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, nil\n\t\t})\n\t})\n\n\treturn cluster, nil\n}", "func CreateZKCluster(t *testing.T, k8client client.Client, z *zkapi.ZookeeperCluster) (*zkapi.ZookeeperCluster, error) {\n\tlog.Printf(\"creating zookeeper cluster: %s\", z.Name)\n\terr := k8client.Create(goctx.TODO(), z)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create CR: %v\", err)\n\t}\n\n\tzookeeper := &zkapi.ZookeeperCluster{}\n\terr = k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zookeeper)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain created CR: %v\", err)\n\t}\n\tlog.Printf(\"created zookeeper cluster: %s\", z.Name)\n\treturn zookeeper, nil\n}", "func CreateRKE1Cluster(client *rancher.Client, rke1Cluster *management.Cluster) (*management.Cluster, error) {\n\tcluster, err := client.Management.Cluster.Create(rke1Cluster)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient, err = client.ReLogin()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient.Session.RegisterCleanupFunc(func() error {\n\t\tadminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclusterResp, err := client.Management.Cluster.ByID(cluster.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twatchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{\n\t\t\tFieldSelector: \"metadata.name=\" + clusterResp.ID,\n\t\t\tTimeoutSeconds: &defaults.WatchTimeoutSeconds,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclient, err = client.ReLogin()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = client.Management.Cluster.Delete(clusterResp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) {\n\t\t\tif event.Type == watch.Error {\n\t\t\t\treturn false, fmt.Errorf(\"there was an error deleting cluster\")\n\t\t\t} else if event.Type == watch.Deleted {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, nil\n\t\t})\n\t})\n\n\treturn cluster, nil\n}", "func GenerateCreateClusterInput(cr *svcapitypes.Cluster) *svcsdk.CreateClusterInput {\n\tres := &svcsdk.CreateClusterInput{}\n\n\tif cr.Spec.ForProvider.CapacityProviders != nil {\n\t\tf0 := []*string{}\n\t\tfor _, f0iter := range cr.Spec.ForProvider.CapacityProviders {\n\t\t\tvar f0elem string\n\t\t\tf0elem = *f0iter\n\t\t\tf0 = append(f0, &f0elem)\n\t\t}\n\t\tres.SetCapacityProviders(f0)\n\t}\n\tif cr.Spec.ForProvider.ClusterName != nil {\n\t\tres.SetClusterName(*cr.Spec.ForProvider.ClusterName)\n\t}\n\tif cr.Spec.ForProvider.Configuration != nil {\n\t\tf2 := &svcsdk.ClusterConfiguration{}\n\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration != nil {\n\t\t\tf2f0 := &svcsdk.ExecuteCommandConfiguration{}\n\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.KMSKeyID != nil {\n\t\t\t\tf2f0.SetKmsKeyId(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.KMSKeyID)\n\t\t\t}\n\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration != nil {\n\t\t\t\tf2f0f1 := &svcsdk.ExecuteCommandLogConfiguration{}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.CloudWatchEncryptionEnabled != nil {\n\t\t\t\t\tf2f0f1.SetCloudWatchEncryptionEnabled(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.CloudWatchEncryptionEnabled)\n\t\t\t\t}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.CloudWatchLogGroupName != nil {\n\t\t\t\t\tf2f0f1.SetCloudWatchLogGroupName(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.CloudWatchLogGroupName)\n\t\t\t\t}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3BucketName != nil {\n\t\t\t\t\tf2f0f1.SetS3BucketName(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3BucketName)\n\t\t\t\t}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3EncryptionEnabled != nil {\n\t\t\t\t\tf2f0f1.SetS3EncryptionEnabled(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3EncryptionEnabled)\n\t\t\t\t}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3KeyPrefix != nil {\n\t\t\t\t\tf2f0f1.SetS3KeyPrefix(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3KeyPrefix)\n\t\t\t\t}\n\t\t\t\tf2f0.SetLogConfiguration(f2f0f1)\n\t\t\t}\n\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.Logging != nil {\n\t\t\t\tf2f0.SetLogging(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.Logging)\n\t\t\t}\n\t\t\tf2.SetExecuteCommandConfiguration(f2f0)\n\t\t}\n\t\tres.SetConfiguration(f2)\n\t}\n\tif cr.Spec.ForProvider.DefaultCapacityProviderStrategy != nil {\n\t\tf3 := []*svcsdk.CapacityProviderStrategyItem{}\n\t\tfor _, f3iter := range cr.Spec.ForProvider.DefaultCapacityProviderStrategy {\n\t\t\tf3elem := &svcsdk.CapacityProviderStrategyItem{}\n\t\t\tif f3iter.Base != nil {\n\t\t\t\tf3elem.SetBase(*f3iter.Base)\n\t\t\t}\n\t\t\tif f3iter.CapacityProvider != nil {\n\t\t\t\tf3elem.SetCapacityProvider(*f3iter.CapacityProvider)\n\t\t\t}\n\t\t\tif f3iter.Weight != nil {\n\t\t\t\tf3elem.SetWeight(*f3iter.Weight)\n\t\t\t}\n\t\t\tf3 = append(f3, f3elem)\n\t\t}\n\t\tres.SetDefaultCapacityProviderStrategy(f3)\n\t}\n\tif cr.Spec.ForProvider.Settings != nil {\n\t\tf4 := []*svcsdk.ClusterSetting{}\n\t\tfor _, f4iter := range cr.Spec.ForProvider.Settings {\n\t\t\tf4elem := &svcsdk.ClusterSetting{}\n\t\t\tif f4iter.Name != nil {\n\t\t\t\tf4elem.SetName(*f4iter.Name)\n\t\t\t}\n\t\t\tif f4iter.Value != nil {\n\t\t\t\tf4elem.SetValue(*f4iter.Value)\n\t\t\t}\n\t\t\tf4 = append(f4, f4elem)\n\t\t}\n\t\tres.SetSettings(f4)\n\t}\n\tif cr.Spec.ForProvider.Tags != nil {\n\t\tf5 := []*svcsdk.Tag{}\n\t\tfor _, f5iter := range cr.Spec.ForProvider.Tags {\n\t\t\tf5elem := &svcsdk.Tag{}\n\t\t\tif f5iter.Key != nil {\n\t\t\t\tf5elem.SetKey(*f5iter.Key)\n\t\t\t}\n\t\t\tif f5iter.Value != nil {\n\t\t\t\tf5elem.SetValue(*f5iter.Value)\n\t\t\t}\n\t\t\tf5 = append(f5, f5elem)\n\t\t}\n\t\tres.SetTags(f5)\n\t}\n\n\treturn res\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\n\tvar resource Cluster\n\terr := ctx.RegisterRemoteComponentResource(\"eks:index:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func CreateClusterRole(name string) *rbacv1.ClusterRole {\n\treturn &rbacv1.ClusterRole{\n\t\tTypeMeta: genTypeMeta(gvk.ClusterRole),\n\t\tObjectMeta: genObjectMeta(name, false),\n\t\tRules: []rbacv1.PolicyRule{\n\t\t\t{\n\t\t\t\tAPIGroups: []string{\n\t\t\t\t\t\"stable.example.com\",\n\t\t\t\t},\n\t\t\t\tResources: []string{\n\t\t\t\t\t\"crontabs\",\n\t\t\t\t},\n\t\t\t\tVerbs: []string{\n\t\t\t\t\t\"get\",\n\t\t\t\t\t\"list\",\n\t\t\t\t\t\"watch\",\n\t\t\t\t\t\"create\",\n\t\t\t\t\t\"update\",\n\t\t\t\t\t\"patch\",\n\t\t\t\t\t\"delete\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func Create(client *golangsdk.ServiceClient, opts CreateOptsBuilder, clusterId string) (r CreateResult) {\n\tb, err := opts.ToSnapshotCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\t_, r.Err = client.Post(createURL(client, clusterId), b, &r.Body, &golangsdk.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\treturn\n}", "func (api *clusterAPI) SyncCreate(obj *cluster.Cluster) error {\n\tnewObj := obj\n\tevtType := kvstore.Created\n\tvar writeErr error\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\tnewObj, writeErr = apicl.ClusterV1().Cluster().Create(context.Background(), obj)\n\t\tif writeErr != nil && strings.Contains(writeErr.Error(), \"AlreadyExists\") {\n\t\t\tnewObj, writeErr = apicl.ClusterV1().Cluster().Update(context.Background(), obj)\n\t\t\tevtType = kvstore.Updated\n\t\t}\n\t}\n\n\tif writeErr == nil {\n\t\tapi.ct.handleClusterEvent(&kvstore.WatchEvent{Object: newObj, Type: evtType})\n\t}\n\treturn writeErr\n}", "func (sqlStore *SQLStore) CreateClusterInstallation(clusterInstallation *model.ClusterInstallation) error {\n\treturn sqlStore.createClusterInstallation(sqlStore.db, clusterInstallation)\n}", "func (c *Controller) processClusterNew(ecs *ecsv1.KubernetesCluster) error {\n\tdeployMode := ecs.Spec.Cluster.DeployMode\n\n\tvar err error\n\tswitch deployMode {\n\tcase ecsv1.BinaryDeployMode:\n\t\terr = c.sshInstaller.ClusterNew(ecs)\n\tcase ecsv1.ContainerDeployMode:\n\t\terr = c.grpcInstaller.ClusterNew(ecs)\n\t}\n\n\tif err != nil {\n\t\tglog.Errorf(\"install cluster %s failed with %v\", ecs.Name, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func NewCluster() *Cluster {\n\treturn &Cluster{}\n}", "func NewCluster() *Cluster {\n\treturn &Cluster{}\n}", "func CreateBKCluster(t *testing.T, k8client client.Client, b *bkapi.BookkeeperCluster) (*bkapi.BookkeeperCluster, error) {\n\tlog.Printf(\"creating bookkeeper cluster: %s\", b.Name)\n\tb.Spec.EnvVars = \"bookkeeper-configmap\"\n\tb.Spec.ZookeeperUri = \"zookeeper-client:2181\"\n\tb.Spec.Probes.LivenessProbe.PeriodSeconds = 10\n\tb.Spec.Probes.ReadinessProbe.PeriodSeconds = 10\n\tb.Spec.Probes.LivenessProbe.TimeoutSeconds = 15\n\tb.Spec.Probes.ReadinessProbe.TimeoutSeconds = 15\n\terr := k8client.Create(goctx.TODO(), b)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create CR: %v\", err)\n\t}\n\n\tbookkeeper := &bkapi.BookkeeperCluster{}\n\terr = k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: b.Namespace, Name: b.Name}, bookkeeper)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain created CR: %v\", err)\n\t}\n\tlog.Printf(\"created bookkeeper cluster: %s\", b.Name)\n\treturn bookkeeper, nil\n}", "func EncodeClusterCreateRequest(m map[string]interface{}) map[string]interface{} {\n\treq := make(map[string]interface{})\n\t// Create requests involving a master version have to be sent under the \"initialMasterVersion\" key.\n\tif val, ok := m[\"currentMasterVersion\"]; ok {\n\t\tm[\"initialClusterVersion\"] = val\n\t\tdelete(m, \"currentMasterVersion\")\n\t}\n\n\tdcl.PutMapEntry(req, []string{\"cluster\"}, m)\n\treturn req\n}", "func (m *InstallManager) provisionCluster() error {\n\n\tm.log.Info(\"running openshift-install create cluster\")\n\n\tif err := m.runOpenShiftInstallCommand(\"create\", \"cluster\"); err != nil {\n\t\tif m.isBootstrapComplete() {\n\t\t\tm.log.WithError(err).Warn(\"provisioning cluster failed after completing bootstrapping, waiting longer for install to complete\")\n\t\t\terr = m.runOpenShiftInstallCommand(\"wait-for\", \"install-complete\")\n\t\t}\n\t\tif err != nil {\n\t\t\tm.log.WithError(err).Error(\"error provisioning cluster\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (client RoverClusterClient) CreateRoverCluster(ctx context.Context, request CreateRoverClusterRequest) (response CreateRoverClusterResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.DefaultRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createRoverCluster, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateRoverClusterResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateRoverClusterResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateRoverClusterResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateRoverClusterResponse\")\n\t}\n\treturn\n}", "func NewCluster() *ClusterBuilder {\n\treturn &ClusterBuilder{}\n}", "func NewCluster(name string, nameSpaces, chartName, chartVersion, values string) (*Cluster, error) {\n\tvar spec MapStringInterface\n\terr := yaml.Unmarshal([]byte(values), &spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcluster := &Cluster{\n\t\tUuid: uuid.NewV4().String(),\n\t\tName: name,\n\t\tNameSpace: nameSpaces,\n\t\tRevision: 0,\n\t\tStatus: ClusterStatusPending,\n\t\tChartName: chartName,\n\t\tChartVersion: chartVersion,\n\t\tValues: values,\n\t\tSpec: spec,\n\t}\n\n\treturn cluster, nil\n}", "func CreateClusterRole(\n\tk8sClient *kubernetes.Clientset,\n\tappName string,\n\tclusterRoleName string,\n\trules []PolicyRule,\n) error {\n\n\tlabels := map[string]string{\n\t\t\"app\": appName,\n\t}\n\n\trbacRules, err := generateRbacRules(rules)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v\", err)\n\t}\n\n\tclusterRole := rbacv1.ClusterRole{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\tKind: \"ClusterRole\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: clusterRoleName,\n\t\t\tLabels: labels,\n\t\t},\n\t\tRules: rbacRules,\n\t}\n\n\tclient := k8sClient.RbacV1().ClusterRoles()\n\t_, err = client.Create(&clusterRole)\n\tif err != nil {\n\t\tif !apierr.IsAlreadyExists(err) {\n\t\t\treturn fmt.Errorf(\"Failed to create ClusterRole %q: %v\", clusterRoleName, err)\n\t\t}\n\t\t_, err = client.Update(&clusterRole)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to update ClusterRole %q: %v\", clusterRoleName, err)\n\t\t}\n\t\tfmt.Printf(\"ClusterRole %q updated\\n\", clusterRoleName)\n\t} else {\n\t\tfmt.Printf(\"ClusterRole %q created\\n\", clusterRoleName)\n\t}\n\treturn nil\n}", "func (mr *MockEKSServiceInterfaceMockRecorder) CreateCluster(input interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateCluster\", reflect.TypeOf((*MockEKSServiceInterface)(nil).CreateCluster), input)\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\n\tvar resource Cluster\n\terr := ctx.RegisterResource(\"aws:elasticache/cluster:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (c *clusterNetwork) Create(cn *sdnapi.ClusterNetwork) (result *sdnapi.ClusterNetwork, err error) {\n\tresult = &sdnapi.ClusterNetwork{}\n\terr = c.r.Post().Resource(\"clusterNetworks\").Body(cn).Do().Into(result)\n\treturn\n}", "func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToClusterCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\tvar result *http.Response\n\tresult, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\n\tif r.Err == nil {\n\t\tr.Header = result.Header\n\t}\n\n\treturn\n}", "func (ch *ClusterHost) Create() error {\n\tLogf(\"%s adding host to %s\\n\", ch.ID(), ch.Path)\n\n\tobj, err := ch.finder.ClusterComputeResource(ch.ctx, ch.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec := types.HostConnectSpec{\n\t\tHostName: ch.Name,\n\t\tPort: ch.Port,\n\t\tSslThumbprint: ch.SslThumbprint,\n\t\tUserName: ch.EsxiUsername,\n\t\tPassword: ch.EsxiPassword,\n\t\tForce: ch.Force,\n\t\tLockdownMode: \"\",\n\t}\n\n\ttask, err := obj.AddHost(ch.ctx, spec, true, &ch.License, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ch.ctx)\n}", "func createClusterServer(opts *NodeOptions) *clusterServer {\n\treturn &clusterServer{workers: sync.Map{}, opts: opts}\n}", "func (store *Store) CreateAndFetchCluster(s *model.SessionContext, event model.Event) *config.Cluster {\n\tcluster := &config.Cluster{}\n\tcluster.UUID = event.UUID\n\tcluster.Name = event.UUID\n\tcluster.CppmVersion = event.CPPMVersion\n\tcluster.TenantID = event.TenantID\n\tcluster.AddedAt = time.Now()\n\tcluster.UpdatedAt = cluster.AddedAt\n\terr := store.DB().Insert(cluster)\n\tif err != nil {\n\t\tlogutil.Errorf(s, \"error in cluster insert:%v\", err)\n\t}\n\treturn cluster\n}", "func (c *ECS) CreateClusterRequest(input *CreateClusterInput) (req *aws.Request, output *CreateClusterOutput) {\n\toprw.Lock()\n\tdefer oprw.Unlock()\n\n\tif opCreateCluster == nil {\n\t\topCreateCluster = &aws.Operation{\n\t\t\tName: \"CreateCluster\",\n\t\t\tHTTPMethod: \"POST\",\n\t\t\tHTTPPath: \"/\",\n\t\t}\n\t}\n\n\treq = c.newRequest(opCreateCluster, input, output)\n\toutput = &CreateClusterOutput{}\n\treq.Data = output\n\treturn\n}", "func NewCreateClusterDefault(code int) *CreateClusterDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &CreateClusterDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (af *flight) NewCluster(rconf *platform.RuntimeConfig) (platform.Cluster, error) {\n\tbc, err := platform.NewBaseCluster(af.BaseFlight, rconf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tac := &cluster{\n\t\tBaseCluster: bc,\n\t\tflight: af,\n\t}\n\n\tif !rconf.NoSSHKeyInMetadata {\n\t\tac.sshKey = af.SSHKey\n\t}\n\n\tac.ResourceGroup, err = af.api.CreateResourceGroup(\"kola-cluster\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tac.StorageAccount, err = af.api.CreateStorageAccount(ac.ResourceGroup)\n\tif err != nil {\n\t\tif e := af.api.TerminateResourceGroup(ac.ResourceGroup); e != nil {\n\t\t\tplog.Errorf(\"Deleting resource group %v: %v\", ac.ResourceGroup, e)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t_, err = af.api.PrepareNetworkResources(ac.ResourceGroup)\n\tif err != nil {\n\t\tif e := af.api.TerminateResourceGroup(ac.ResourceGroup); e != nil {\n\t\t\tplog.Errorf(\"Deleting resource group %v: %v\", ac.ResourceGroup, e)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\taf.AddCluster(ac)\n\n\treturn ac, nil\n}", "func (client *Client) CreateClusterWithCallback(request *CreateClusterRequest, callback func(response *CreateClusterResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateClusterResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateCluster(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func ExampleRDS_CreateDBCluster_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.CreateDBClusterInput{\n\t\tDBClusterIdentifier: aws.String(\"sample-cluster\"),\n\t\tDBSubnetGroupName: aws.String(\"default\"),\n\t\tEngine: aws.String(\"aurora-mysql\"),\n\t\tEngineVersion: aws.String(\"5.7.12\"),\n\t\tMasterUserPassword: aws.String(\"mypassword\"),\n\t\tMasterUsername: aws.String(\"admin\"),\n\t\tVpcSecurityGroupIds: []*string{\n\t\t\taws.String(\"sg-0b91305example\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateDBCluster(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBClusterAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInsufficientStorageClusterCapacityFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInsufficientStorageClusterCapacityFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeStorageQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeStorageQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBSubnetGroupNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSubnetGroupNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidVPCNetworkStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidVPCNetworkStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBSubnetGroupStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBSubnetGroupStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidSubnet:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidSubnet, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBInstanceStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBInstanceStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterParameterGroupNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterParameterGroupNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeKMSKeyNotAccessibleFault:\n\t\t\t\tfmt.Println(rds.ErrCodeKMSKeyNotAccessibleFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBInstanceNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBInstanceNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBSubnetGroupDoesNotCoverEnoughAZs:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSubnetGroupDoesNotCoverEnoughAZs, aerr.Error())\n\t\t\tcase rds.ErrCodeGlobalClusterNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeGlobalClusterNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidGlobalClusterStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidGlobalClusterStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDomainNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDomainNotFoundFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (flags Etcd) CreateInitialCluster(sctx *ServiceContext) string {\n\treturn flags.createEndpoints(sctx, defaultEtcdPeerPort, func(node Node) string {\n\t\treturn node.Name + \"=\"\n\t})\n}", "func NewCluster(MyCluster []Barebone) Cluster {\n\tvar retCluster Cluster\n\tretCluster.Machines = &MyCluster\n\treturn retCluster\n}", "func NewCluster(driverName, name, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {\n\trpcClient, err := types.NewClient(driverName, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{\n\t\tDriver: rpcClient,\n\t\tDriverName: driverName,\n\t\tName: name,\n\t\tConfigGetter: configGetter,\n\t\tPersistStore: persistStore,\n\t}, nil\n}", "func (s *Session) CreateClusterModule(ctx context.Context) (string, error) {\n\tlog.Info(\"Creating clusterModule\")\n\n\trestClient := s.Client.RestClient()\n\tmoduleId, err := cluster.NewManager(restClient).CreateModule(ctx, s.cluster)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlog.Info(\"Created clusterModule\", \"moduleId\", moduleId)\n\treturn moduleId, nil\n}" ]
[ "0.8185021", "0.78084165", "0.7425226", "0.7330281", "0.7253956", "0.723301", "0.7183776", "0.71564835", "0.7125142", "0.70509756", "0.7045551", "0.7010462", "0.69699454", "0.69615054", "0.69457465", "0.68583614", "0.68540156", "0.6843783", "0.6820751", "0.68091303", "0.6807314", "0.6793698", "0.6777245", "0.6776395", "0.6766535", "0.6763911", "0.6749805", "0.673112", "0.6711841", "0.67063326", "0.6687199", "0.66770184", "0.6658003", "0.66326046", "0.66298336", "0.66253114", "0.65733355", "0.6564436", "0.648836", "0.6438566", "0.6408279", "0.639541", "0.63874114", "0.63796514", "0.63313586", "0.6330957", "0.6311818", "0.63037395", "0.630113", "0.6293084", "0.6274829", "0.6264436", "0.6244175", "0.6239229", "0.6225662", "0.6213236", "0.6196764", "0.6176646", "0.61606634", "0.61337787", "0.61251384", "0.6119584", "0.61151826", "0.60618037", "0.6034891", "0.6027865", "0.6027373", "0.6023808", "0.59923506", "0.59912026", "0.59887904", "0.5985118", "0.5970037", "0.59616655", "0.59477836", "0.59437466", "0.59437466", "0.59407145", "0.5923381", "0.59095734", "0.5898378", "0.58953685", "0.58876836", "0.58801645", "0.5877085", "0.5862014", "0.5859095", "0.5851745", "0.5846198", "0.5840839", "0.5831111", "0.5830067", "0.5829756", "0.58265644", "0.58165276", "0.57961565", "0.57961327", "0.5795745", "0.5789958", "0.57772166" ]
0.7769736
2
DeleteCluster deletes the cluster from the database
func (p PGSQLConnection) DeleteCluster(cluster *ClusterModel) error { tx, err := p.connection.Beginx() if err != nil { return err } _, err = tx.NamedExec("DELETE FROM clusters WHERE cluster_name = :cluster_name", cluster) if err != nil { return err } return tx.Commit() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *client) DeleteCluster() error {\n\t_, err := c.runCmd(\"cluster\", \"delete\", c.clusterName)\n\treturn err\n}", "func DeleteCluster(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagDeleteCluster, \"Delete cluster start\")\n\n\tcl, err := cloud.GetClusterFromDB(c)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif cloud.DeleteCluster(cl, c) {\n\t\t// cluster delete success, delete from db\n\t\tif cloud.DeleteFromDb(cl, c) {\n\t\t\tupdatePrometheus()\n\t\t}\n\t}\n\n}", "func (k Kind) DeleteCluster() error {\n\tcmd := kindCommand(\"kind delete cluster\")\n\treturn cmd.Run()\n}", "func (c *Client) DeleteCluster(projectID string, seed string, clusterID string) error {\n\treq, err := c.newRequest(\"DELETE\", projectPath+\"/\"+projectID+datacenterSubPath+\"/\"+seed+clustersSubPath+\"/\"+clusterID, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := c.do(req, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// StatusCodes 401 and 403 mean empty response and should be treated as such\n\tif resp.StatusCode == 401 || resp.StatusCode == 403 {\n\t\treturn nil\n\t}\n\n\tif resp.StatusCode >= 299 {\n\t\treturn errors.New(\"Got non-2xx return code: \" + strconv.Itoa(resp.StatusCode))\n\t}\n\n\treturn nil\n}", "func (m *CDatabase) Delete(cluster Cluster) error {\n\terr := db.C(COLLECTION).Remove(&cluster)\n\treturn err\n}", "func (a *ClusterAPI) DeleteCluster(c *gin.Context) {\n\tcommonCluster, ok := a.clusterGetter.GetClusterFromRequest(c)\n\tif ok != true {\n\t\treturn\n\t}\n\n\tforce, _ := strconv.ParseBool(c.DefaultQuery(\"force\", \"false\"))\n\n\t// DeleteCluster deletes the underlying model, so we get this data here\n\tclusterID, clusterName := commonCluster.GetID(), commonCluster.GetName()\n\n\tctx := ginutils.Context(c.Request.Context(), c)\n\n\t// delete cluster from cluster group\n\terr := a.clusterGroupManager.RemoveClusterFromGroup(ctx, clusterID)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, pkgCommon.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: err.Error(),\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tswitch {\n\tcase commonCluster.GetDistribution() == pkgCluster.PKE && commonCluster.GetCloud() == pkgCluster.Azure:\n\t\tif err := a.clusterDeleters.PKEOnAzure.DeleteByID(ctx, commonCluster.GetID(), force); err != nil {\n\t\t\tpkgCommon.ErrorResponseWithStatus(c, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\t_ = a.clusterManager.DeleteCluster(ctx, commonCluster, force)\n\t}\n\n\tif anchore.AnchoreEnabled && commonCluster.GetSecurityScan() {\n\t\tanchore.RemoveAnchoreUser(commonCluster.GetOrganizationId(), commonCluster.GetUID())\n\t}\n\n\tc.JSON(http.StatusAccepted, DeleteClusterResponse{\n\t\tStatus: http.StatusAccepted,\n\t\tName: clusterName,\n\t\tResourceID: clusterID,\n\t})\n}", "func DeleteCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) error {\n\tt.Logf(\"deleting zookeeper cluster: %s\", z.Name)\n\terr := f.Client.Delete(goctx.TODO(), z)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete CR: %v\", err)\n\t}\n\n\tt.Logf(\"deleted zookeeper cluster: %s\", z.Name)\n\treturn nil\n}", "func (svc ServerlessClusterService) Delete(ctx context.Context,\n\tinput *models.ClusterDeleteInput) (*models.ClusterId, *Response, error) {\n\tvar clusterId models.ClusterId\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"deleteCluster\",\n\t\tOperation: models.Mutation,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: clusterId,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &clusterId)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &clusterId, resp, err\n}", "func (c *AKSCluster) DeleteCluster() error {\n\tlog := logger.WithFields(logrus.Fields{\"action\": constants.TagDeleteCluster})\n\tclient, err := c.GetAKSClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.With(log.Logger)\n\n\t// set azure props\n\tdatabase := model.GetDB()\n\tdatabase.Where(model.AzureClusterModel{ClusterModelId: c.modelCluster.ID}).First(&c.modelCluster.Azure)\n\n\terr = azureClient.DeleteCluster(client, c.modelCluster.Name, c.modelCluster.Azure.ResourceGroup)\n\tif err != nil {\n\t\tlog.Info(\"Delete succeeded\")\n\t\treturn nil\n\t}\n\t// todo status code !?\n\treturn err\n}", "func (s *Server) DeleteCluster(id uint64) {\n\ts.doDeleteCluster(id)\n}", "func DeleteCluster(name string, orgID string) {\n\n\tStep(fmt.Sprintf(\"Delete cluster [%s] in org [%s]\", name, orgID), func() {\n\t\tbackupDriver := Inst().Backup\n\t\tclusterDeleteReq := &api.ClusterDeleteRequest{\n\t\t\tOrgId: orgID,\n\t\t\tName: name,\n\t\t}\n\t\tctx, err := backup.GetPxCentralAdminCtx()\n\t\texpect(err).NotTo(haveOccurred(),\n\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\terr))\n\t\tbackupDriver.DeleteCluster(ctx, clusterDeleteReq)\n\t\t// Best effort cleanup, dont fail test, if deletion fails\n\t\t//expect(err).NotTo(haveOccurred(),\n\t\t//\tfmt.Sprintf(\"Failed to delete cluster [%s] in org [%s]\", name, orgID))\n\t})\n}", "func (c starterClusterServiceOp) Delete(ctx context.Context, input *models.ClusterDeleteInput) (*models.ClusterId, *Response, error) {\n\tvar clusterId models.ClusterId\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"deleteCluster\",\n\t\tOperation: models.Mutation,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: clusterId,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &clusterId)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &clusterId, resp, err\n}", "func DeleteCluster(c echo.Context) error {\n\tcblog.Info(\"call DeleteCluster()\")\n\n\tvar req struct {\n\t\tNameSpace string\n\t\tConnectionName string\n\t}\n\n\tif err := c.Bind(&req); err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\tclusterName := c.Param(\"Name\")\n\t// Resource Name has namespace prefix when from Tumblebug\n\tif req.NameSpace != \"\" {\n\t\tnameSpace := req.NameSpace + \"-\"\n\n\t\t// Cluster's Name\n\t\tclusterName = nameSpace + clusterName\n\t}\n\t// Call common-runtime API\n\tresult, err := cmrt.DeleteCluster(req.ConnectionName, rsCluster, clusterName, c.QueryParam(\"force\"))\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\tresultInfo := BooleanInfo{\n\t\tResult: strconv.FormatBool(result),\n\t}\n\n\treturn c.JSON(http.StatusOK, &resultInfo)\n}", "func (a *Actuator) Delete(cluster *clusterv1.Cluster) error {\n\ta.log.Info(\"Deleting cluster\", \"cluster-name\", cluster.Name, \"cluster-namespace\", cluster.Namespace)\n\n\tscope, err := scope.NewClusterScope(scope.ClusterScopeParams{\n\t\tCluster: cluster,\n\t\tClient: a.Client,\n\t\tLogger: a.log,\n\t})\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to create scope: %+v\", err)\n\t}\n\n\tdefer scope.Close()\n\n\tec2svc := ec2.NewService(scope)\n\telbsvc := elb.NewService(scope)\n\n\tif err := elbsvc.DeleteLoadbalancers(); err != nil {\n\t\treturn errors.Errorf(\"unable to delete load balancers: %+v\", err)\n\t}\n\n\tif err := ec2svc.DeleteBastion(); err != nil {\n\t\treturn errors.Errorf(\"unable to delete bastion: %+v\", err)\n\t}\n\n\tif err := ec2svc.DeleteNetwork(); err != nil {\n\t\ta.log.Error(err, \"Error deleting cluster\", \"cluster-name\", cluster.Name, \"cluster-namespace\", cluster.Namespace)\n\t\treturn &controllerError.RequeueAfterError{\n\t\t\tRequeueAfter: 5 * time.Second,\n\t\t}\n\t}\n\n\treturn nil\n}", "func (w *worker) deleteCluster(cluster *chop.ChiCluster) error {\n\tw.a.V(2).M(cluster).S().P()\n\tdefer w.a.V(2).M(cluster).E().P()\n\n\tw.a.V(1).\n\t\tWithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteStarted).\n\t\tWithStatusAction(cluster.CHI).\n\t\tM(cluster).F().\n\t\tInfo(\"Delete cluster %s/%s - started\", cluster.Address.Namespace, cluster.Name)\n\n\t// Delete all shards\n\tcluster.WalkShards(func(index int, shard *chop.ChiShard) error {\n\t\treturn w.deleteShard(shard)\n\t})\n\n\t// Delete Cluster Service\n\t_ = w.c.deleteServiceCluster(cluster)\n\n\tw.a.V(1).\n\t\tWithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteCompleted).\n\t\tWithStatusAction(cluster.CHI).\n\t\tM(cluster).F().\n\t\tInfo(\"Delete cluster %s/%s - completed\", cluster.Address.Namespace, cluster.Name)\n\n\treturn nil\n}", "func (base *SLXOrcaBase) DeleteCluster(client *client.NetconfClient, clusterName string, clusterID string) (string, error) {\n\tvar mctCluster = map[string]interface{}{\"cluster_name\": clusterName, \"cluster_id\": clusterID}\n\n\tconfig, templateError := base.GetStringFromTemplate(mctClusterDelete, mctCluster)\n\tif templateError != nil {\n\t\treturn \"\", templateError\n\t}\n\n\tresp, err := client.EditConfig(config)\n\n\treturn resp, err\n}", "func DeleteCluster(c *cli.Context) error {\n\n\tclusters, err := getClusters(c.Bool(\"all\"), c.String(\"name\"))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(clusters) == 0 {\n\t\tif !c.IsSet(\"all\") && !c.IsSet(\"name\") {\n\t\t\treturn fmt.Errorf(\"No cluster with name '%s' found (You can add `--all` and `--name <CLUSTER-NAME>` to delete other clusters)\", c.String(\"name\"))\n\t\t}\n\t\treturn fmt.Errorf(\"No cluster(s) found\")\n\t}\n\n\t// remove clusters one by one instead of appending all names to the docker command\n\t// this allows for more granular error handling and logging\n\tfor _, cluster := range clusters {\n\t\tlog.Printf(\"Removing cluster [%s]\", cluster.name)\n\t\tif len(cluster.workers) > 0 {\n\t\t\t// TODO: this could be done in goroutines\n\t\t\tlog.Printf(\"...Removing %d workers\\n\", len(cluster.workers))\n\t\t\tfor _, worker := range cluster.workers {\n\t\t\t\tif err := removeContainer(worker.ID); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdeleteClusterDir(cluster.name)\n\t\tlog.Println(\"...Removing server\")\n\t\tif err := removeContainer(cluster.server.ID); err != nil {\n\t\t\treturn fmt.Errorf(\" Couldn't remove server for cluster %s\\n%+v\", cluster.name, err)\n\t\t}\n\n\t\tif err := disconnectRegistryFromNetwork(cluster.name, c.IsSet(\"keep-registry-volume\")); err != nil {\n\t\t\tlog.Warningf(\"Couldn't disconnect Registry from network %s\\n%+v\", cluster.name, err)\n\t\t}\n\n\t\tif c.IsSet(\"prune\") {\n\t\t\t// disconnect any other container that is connected to the k3d network\n\t\t\tnid, err := getClusterNetwork(cluster.name)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warningf(\"Couldn't get the network for cluster %q\\n%+v\", cluster.name, err)\n\t\t\t}\n\t\t\tcids, err := getContainersInNetwork(nid)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warningf(\"Couldn't get the list of containers connected to network %q\\n%+v\", nid, err)\n\t\t\t}\n\t\t\tfor _, cid := range cids {\n\t\t\t\terr := disconnectContainerFromNetwork(cid, nid)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warningf(\"Couldn't disconnect container %q from network %q\", cid, nid)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"...%q has been forced to disconnect from %q's network\", cid, cluster.name)\n\t\t\t}\n\t\t}\n\n\t\tif err := deleteClusterNetwork(cluster.name); err != nil {\n\t\t\tlog.Warningf(\"Couldn't delete cluster network for cluster %s\\n%+v\", cluster.name, err)\n\t\t}\n\n\t\tlog.Println(\"...Removing docker image volume\")\n\t\tif err := deleteImageVolume(cluster.name); err != nil {\n\t\t\tlog.Warningf(\"Couldn't delete image docker volume for cluster %s\\n%+v\", cluster.name, err)\n\t\t}\n\n\t\tlog.Infof(\"Removed cluster [%s]\", cluster.name)\n\t}\n\n\treturn nil\n}", "func DeleteCluster(ctx context.Context, clusterClient containerservice.ManagedClustersClient, resourceName string) (result string, err error) {\n\tresourceGroupName := resourceName + \"-group\"\n\n\tfuture, err := clusterClient.Delete(ctx, resourceGroupName, resourceName)\n\tif err != nil {\n\t\treturn result, fmt.Errorf(\"error deleting cluster: %v\", err)\n\t}\n\n\tresult = future.Status()\n\tif result != \"InProgress\" {\n\t\treturn \"\", fmt.Errorf(\"current status of delete: %v\", result)\n\t}\n\n\tmsg := \"Deleting \" + resourceName + \" cluster\"\n\n\treturn msg, err\n\n\t// TODO: delete resource group also, if nothing else is in it\n}", "func (adm Admin) DropCluster(cluster string) error {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Disconnect()\n\n\tkb := KeyBuilder{cluster}\n\tc := kb.cluster()\n\n\treturn conn.DeleteTree(c)\n}", "func DeleteTestCluster() {\n\tmg.Deps(tools.EnsureKind)\n\n\tmust.RunE(\"kind\", \"delete\", \"cluster\", \"--name\", getKindClusterName())\n\n\tif isOnDockerNetwork(getRegistryName(), \"kind\") {\n\t\tmust.RunE(\"docker\", \"network\", \"disconnect\", \"kind\", getRegistryName())\n\t}\n}", "func DeleteBKCluster(t *testing.T, k8client client.Client, b *bkapi.BookkeeperCluster) error {\n\tlog.Printf(\"deleting bookkeeper cluster: %s\", b.Name)\n\terr := k8client.Delete(goctx.TODO(), b)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"failed to delete CR: %v\", err)\n\t}\n\n\tlog.Printf(\"deleted bookkeeper cluster: %s\", b.Name)\n\treturn nil\n}", "func (us *ClusterStore) Delete(u *model.Cluster) error {\n\treturn us.db.Unscoped().Delete(u).Error\n}", "func (adm Admin) DropCluster(cluster string) error {\n\tkb := KeyBuilder{cluster}\n\tc := kb.cluster()\n\n\treturn adm.zkClient.DeleteTree(c)\n}", "func (m *Monitor) deleteCluster(managedCluster *clusterv1.ManagedCluster) {\n\tglog.V(2).Info(\"Processing Cluster Delete.\")\n\n\tclusterToDelete := managedCluster.GetName()\n\tfor clusterIdx, cluster := range m.ManagedClusterInfo {\n\t\tif clusterToDelete == cluster.Namespace {\n\t\t\tglog.Infof(\"Removing %s from Insights cluster list\", clusterToDelete)\n\t\t\tm.ManagedClusterInfo = append(m.ManagedClusterInfo[:clusterIdx], m.ManagedClusterInfo[clusterIdx+1:]...)\n\t\t}\n\t}\n}", "func (mcr *MiddlewareClusterRepo) Delete(id int) error {\n\ttx, err := mcr.Transaction()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\terr = tx.Close()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"metadata MiddlewareClusterRepo.Delete(): close database connection failed.\\n%s\", err.Error())\n\t\t}\n\t}()\n\n\terr = tx.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsql := `delete from t_meta_middleware_cluster_info where id = ?;`\n\tlog.Debugf(\"metadata MiddlewareClusterRepo.Delete() update sql: %s\", sql)\n\t_, err = mcr.Execute(sql, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn tx.Commit()\n}", "func SyncDeletePassCCCluster(taskID string, cluster *proto.Cluster) {\n\terr := passcc.GetCCClient().DeletePassCCCluster(cluster.ProjectID, cluster.ClusterID)\n\tif err != nil {\n\t\tblog.Errorf(\"CleanClusterDBInfoTask[%s]: DeletePassCCCluster[%s] failed: %v\", taskID, cluster.ClusterID, err)\n\t} else {\n\t\tblog.Infof(\"CleanClusterDBInfoTask[%s]: DeletePassCCCluster[%s] successful\", taskID, cluster.ClusterID)\n\t}\n}", "func (p *ClusterProvider) Delete() error {\n\t// This return nil for existing cluster\n\treturn nil\n}", "func (c *AKSCluster) DeleteFromDatabase() error {\n\terr := c.modelCluster.Delete()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.modelCluster = nil\n\treturn nil\n}", "func (m *K3dClusterManager) Delete(ctx context.Context) error {\n\tclusterConfig := &types.Cluster{\n\t\tName: ClusterName,\n\t}\n\n\tclusterConfig, err := m.get(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn k3dcluster.ClusterDelete(ctx, m.runtime, clusterConfig)\n}", "func (m *Multicluster) deleteCluster(clusterID cluster.ID) {\n\tm.opts.MeshServiceController.UnRegisterHandlersForCluster(clusterID)\n\tm.opts.MeshServiceController.DeleteRegistry(clusterID, provider.Kubernetes)\n\tkc, ok := m.remoteKubeControllers[clusterID]\n\tif !ok {\n\t\tlog.Infof(\"cluster %s does not exist, maybe caused by invalid kubeconfig\", clusterID)\n\t\treturn\n\t}\n\tif kc.workloadEntryController != nil {\n\t\tm.opts.MeshServiceController.DeleteRegistry(clusterID, provider.External)\n\t}\n\tif err := kc.Cleanup(); err != nil {\n\t\tlog.Warnf(\"failed cleaning up services in %s: %v\", clusterID, err)\n\t}\n\tdelete(m.remoteKubeControllers, clusterID)\n}", "func Delete(ctx context.Context, client *v1.ServiceClient, clusterID, nodegroupID string) (*v1.ResponseResult, error) {\n\turl := strings.Join([]string{client.Endpoint, v1.ResourceURLCluster, clusterID, v1.ResourceURLNodegroup, nodegroupID}, \"/\")\n\tresponseResult, err := client.DoRequest(ctx, http.MethodDelete, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif responseResult.Err != nil {\n\t\terr = responseResult.Err\n\t}\n\n\treturn responseResult, err\n}", "func (api *clusterAPI) Delete(obj *cluster.Cluster) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.ClusterV1().Cluster().Delete(context.Background(), &obj.ObjectMeta)\n\t\treturn err\n\t}\n\n\tapi.ct.handleClusterEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Deleted})\n\treturn nil\n}", "func (sqlStore *SQLStore) DeleteClusterInstallation(id string) error {\n\t_, err := sqlStore.execBuilder(sqlStore.db, sq.\n\t\tUpdate(\"ClusterInstallation\").\n\t\tSet(\"DeleteAt\", model.GetMillis()).\n\t\tWhere(\"ID = ?\", id).\n\t\tWhere(\"DeleteAt = 0\"),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to mark cluster installation as deleted\")\n\t}\n\n\treturn nil\n}", "func (a ClustersAPI) Delete(clusterID string) error {\n\treturn a.Terminate(clusterID)\n}", "func (elementConfiguration *ElementConfiguration) DeleteCluster(version string) {\n\telementConfiguration.ClustersX.Lock()\n\tdelete(elementConfiguration.Clusters, version)\n\telementConfiguration.ClustersX.Unlock()\n}", "func (j *Juju) DestroyCluster() error {\n\tcontroller := \"\"\n\tif j.Kind == Aws {\n\t\tcontroller = j.AwsCl.Region\n\t} else if j.Kind == Maas {\n\t\tcontroller = j.Name\n\t} else {\n\t\treturn errors.New(\"DestroyCluster: Juju.Kind must be a supported cloud\")\n\t}\n\tcontroller = strings.Replace(controller, \"/\", \"-\", -1)\n\n\ttmp := \"JUJU_DATA=\" + JujuDataPrefix + j.Name\n\tcmd := exec.Command(\"juju\", \"destroy-controller\", \"--destroy-all-models\", controller, \"-y\")\n\tcmd.Env = append(os.Environ(), tmp)\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"DestroyCluster error: %v: %s\", err, err.(*exec.ExitError).Stderr)\n\t}\n\tlog.Debug(string(out))\n\treturn nil\n}", "func (s *Server) DeleteCluster(ctx context.Context, name string) {\n\ts.Clusters.Delete(ctx, name)\n}", "func (a *Controller) Delete(ctx context.Context, cluster *clustersv1.Cluster) (reconcile.Result, error) {\n\ta.logger.Debug(\"attempting to delete the cluster from the api\")\n\n\tfinalizer := kubernetes.NewFinalizer(a.mgr.GetClient(), finalizerName)\n\tif !finalizer.IsDeletionCandidate(cluster) {\n\t\ta.logger.Debug(\"not ready for deletion yet\")\n\n\t\treturn reconcile.Result{}, nil\n\t}\n\toriginal := cluster.DeepCopyObject()\n\n\tcomponents, err := NewComponents()\n\tif err != nil {\n\t\ta.logger.WithError(err).Error(\"trying to create the components\")\n\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tresult, err := func() (reconcile.Result, error) {\n\t\tp, err := a.Provider(cluster.Spec.Kind)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, controllers.NewCriticalError(err)\n\t\t}\n\n\t\treturn controllers.DefaultEnsureHandler.Run(ctx,\n\t\t\t[]controllers.EnsureFunc{\n\t\t\t\ta.Deleting(cluster),\n\t\t\t\tp.Components(cluster, components),\n\t\t\t\ta.Components(cluster, components),\n\t\t\t\ta.Load(cluster, components),\n\t\t\t\ta.Remove(cluster, components),\n\t\t\t\ta.RemoveFinalizer(cluster),\n\t\t\t},\n\t\t)\n\t}()\n\tif err != nil {\n\t\ta.logger.WithError(err).Error(\"trying to delete the cluster\")\n\n\t\tif controllers.IsCriticalError(err) {\n\t\t\tcluster.Status.Status = corev1.FailureStatus\n\t\t\tcluster.Status.Message = err.Error()\n\t\t}\n\t}\n\n\tif err := a.mgr.GetClient().Status().Patch(ctx, cluster, client.MergeFrom(original)); err != nil {\n\t\tif !kerrors.IsNotFound(err) {\n\t\t\ta.logger.WithError(err).Error(\"failed to update the cluster status\")\n\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\treturn result, err\n}", "func (dm *DMap) deleteOnCluster(hkey uint64, key string, f *fragment) error {\n\towners := dm.s.primary.PartitionOwnersByHKey(hkey)\n\tif len(owners) == 0 {\n\t\tpanic(\"partition owners list cannot be empty\")\n\t}\n\n\terr := dm.deleteFromPreviousOwners(key, owners)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif dm.s.config.ReplicaCount != 0 {\n\t\terr := dm.deleteBackupOnCluster(hkey, key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = f.storage.Delete(hkey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// DeleteHits is the number of deletion reqs resulting in an item being removed.\n\tDeleteHits.Increase(1)\n\n\treturn nil\n}", "func (client RoverClusterClient) deleteRoverCluster(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/roverClusters/{roverClusterId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteRoverClusterResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/rover/20201210/RoverCluster/DeleteRoverCluster\"\n\t\terr = common.PostProcessServiceError(err, \"RoverCluster\", \"DeleteRoverCluster\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func DeletePravegaCluster(t *testing.T, k8client client.Client, p *api.PravegaCluster) error {\n\tlog.Printf(\"deleting pravega cluster: %s\", p.Name)\n\terr := k8client.Delete(goctx.TODO(), p)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"failed to delete CR: %v\", err)\n\t}\n\n\tlog.Printf(\"deleted pravega cluster: %s\", p.Name)\n\treturn nil\n}", "func DeleteZKCluster(t *testing.T, k8client client.Client, z *zkapi.ZookeeperCluster) error {\n\tlog.Printf(\"deleting zookeeper cluster: %s\", z.Name)\n\terr := k8client.Delete(goctx.TODO(), z)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"failed to delete CR: %v\", err)\n\t}\n\tlog.Printf(\"deleted zookeeper cluster: %s\", z.Name)\n\treturn nil\n}", "func ExampleRDS_DeleteDBCluster_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.DeleteDBClusterInput{\n\t\tDBClusterIdentifier: aws.String(\"mycluster\"),\n\t\tFinalDBSnapshotIdentifier: aws.String(\"mycluster-final-snapshot\"),\n\t\tSkipFinalSnapshot: aws.Bool(false),\n\t}\n\n\tresult, err := svc.DeleteDBCluster(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBClusterNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterSnapshotAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterSnapshotAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeSnapshotQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeSnapshotQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterSnapshotStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterSnapshotStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterAutomatedBackupQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterAutomatedBackupQuotaExceededFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (a *HyperflexApiService) DeleteHyperflexBackupCluster(ctx context.Context, moid string) ApiDeleteHyperflexBackupClusterRequest {\n\treturn ApiDeleteHyperflexBackupClusterRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func DeleteCSPCluster(c echo.Context) error {\n\tcblog.Info(\"call DeleteCSPCluster()\")\n\n\tvar req struct {\n\t\tConnectionName string\n\t}\n\n\tif err := c.Bind(&req); err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\t// Call common-runtime API\n\tresult, _, err := cmrt.DeleteCSPResource(req.ConnectionName, rsCluster, c.Param(\"Id\"))\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\tresultInfo := BooleanInfo{\n\t\tResult: strconv.FormatBool(result),\n\t}\n\n\treturn c.JSON(http.StatusOK, &resultInfo)\n}", "func (mcs *MySQLClusterService) Delete(id int) error {\n\treturn mcs.MySQLClusterRepo.Delete(id)\n}", "func (a *Server) DeleteTrustedCluster(ctx context.Context, name string) error {\n\tcn, err := a.GetClusterName()\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\t// This check ensures users are not deleting their root/own cluster.\n\tif cn.GetClusterName() == name {\n\t\treturn trace.BadParameter(\"trusted cluster %q is the name of this root cluster and cannot be removed.\", name)\n\t}\n\n\t// Remove all CAs\n\tfor _, caType := range []types.CertAuthType{types.HostCA, types.UserCA, types.DatabaseCA, types.OpenSSHCA} {\n\t\tif err := a.DeleteCertAuthority(ctx, types.CertAuthID{Type: caType, DomainName: name}); err != nil {\n\t\t\tif !trace.IsNotFound(err) {\n\t\t\t\treturn trace.Wrap(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := a.DeleteReverseTunnel(name); err != nil {\n\t\tif !trace.IsNotFound(err) {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t}\n\n\tif err := a.Services.DeleteTrustedCluster(ctx, name); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tif err := a.emitter.EmitAuditEvent(ctx, &apievents.TrustedClusterDelete{\n\t\tMetadata: apievents.Metadata{\n\t\t\tType: events.TrustedClusterDeleteEvent,\n\t\t\tCode: events.TrustedClusterDeleteCode,\n\t\t},\n\t\tUserMetadata: authz.ClientUserMetadata(ctx),\n\t\tResourceMetadata: apievents.ResourceMetadata{\n\t\t\tName: name,\n\t\t},\n\t}); err != nil {\n\t\tlog.WithError(err).Warn(\"Failed to emit trusted cluster delete event.\")\n\t}\n\n\treturn nil\n}", "func (gc *GKECluster) Delete() error {\n\tif !gc.NeedCleanup {\n\t\treturn nil\n\t}\n\t// TODO: Perform GKE specific cluster deletion logics\n\treturn nil\n}", "func (m *Multicluster) ClusterDeleted(clusterID cluster.ID) {\n\tm.m.Lock()\n\tm.deleteCluster(clusterID)\n\tm.m.Unlock()\n\tif m.XDSUpdater != nil {\n\t\tm.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.ClusterUpdate)})\n\t}\n}", "func (a *ClustersApiService) DeleteClusterExecute(r ApiDeleteClusterRequest) (DeleteClusterResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue DeleteClusterResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ClustersApiService.DeleteCluster\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/spaces/{space}/clusters/{cluster-id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"space\"+\"}\", _neturl.PathEscape(parameterToString(r.space, \"\")), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"cluster-id\"+\"}\", _neturl.PathEscape(parameterToString(r.clusterId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func Delete(name string) error {\n\tinstance, err := Get(name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find a cluster named '%s': %s\", name, err.Error())\n\t}\n\terr = instance.Delete()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete infrastructure of cluster '%s': %s\", name, err.Error())\n\t}\n\n\t// Deletes the network and related stuff\n\tutils.DeleteNetwork(instance.GetNetworkID())\n\n\t// Cleanup Object Storage data\n\treturn instance.RemoveDefinition()\n}", "func (ch *ClusterHost) Delete() error {\n\tLogf(\"%s removing host from %s\\n\", ch.ID(), ch.Path)\n\n\tobj, err := ch.finder.HostSystem(ch.ctx, path.Join(ch.Path, ch.Name))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn vSphereRemoveHost(ch.ctx, obj)\n}", "func (c *Client) DeleteAllCluster(ctx context.Context, project, location string, filter func(*Cluster) bool) error {\n\tlistObj, err := c.ListCluster(ctx, project, location)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.deleteAllCluster(ctx, filter, listObj.Items)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor listObj.HasNext() {\n\t\terr = listObj.Next(ctx, c)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\terr = c.deleteAllCluster(ctx, filter, listObj.Items)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func DeleteClusterNode(clusterName, nodeName string) error {\n\t// get all control plane nodes\n\tallControlPlanes, err := nodes.List(\n\t\tfmt.Sprintf(\"label=%s=%s\", constants.ClusterLabelKey, clusterName),\n\t\tfmt.Sprintf(\"label=%s=%s\", constants.NodeRoleKey, constants.ControlPlaneNodeRoleValue),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar node nodes.Node\n\t// pick one that doesn't match the node name we are trying to delete\n\tfor _, n := range allControlPlanes {\n\t\tif n.Name() != nodeName {\n\t\t\tnode = n\n\t\t\tbreak\n\t\t}\n\t}\n\tcmd := node.Command(\n\t\t\"kubectl\",\n\t\t\"--kubeconfig\", \"/etc/kubernetes/admin.conf\",\n\t\t\"delete\", \"node\", nodeName,\n\t)\n\tlines, err := exec.CombinedOutputLines(cmd)\n\tif err != nil {\n\t\tfor _, line := range lines {\n\t\t\tfmt.Println(line)\n\t\t}\n\t\treturn errors.Wrap(err, \"failed to delete cluster node\")\n\t}\n\treturn nil\n}", "func (mr *MockRdbClientMockRecorder) DeleteCluster(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteCluster\", reflect.TypeOf((*MockRdbClient)(nil).DeleteCluster), varargs...)\n}", "func (a *Client) TerminateCluster(params *TerminateClusterParams, authInfo runtime.ClientAuthInfoWriter) (*TerminateClusterAccepted, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewTerminateClusterParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"TerminateCluster\",\n\t\tMethod: \"DELETE\",\n\t\tPathPattern: \"/api/v1/clusters/{name}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &TerminateClusterReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*TerminateClusterAccepted), nil\n\n}", "func (m *MockRDSAPI) DeleteDBCluster(arg0 *rds.DeleteDBClusterInput) (*rds.DeleteDBClusterOutput, error) {\n\tret := m.ctrl.Call(m, \"DeleteDBCluster\", arg0)\n\tret0, _ := ret[0].(*rds.DeleteDBClusterOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func deleteClusterTask(ctx context.Context, t *testing.T, c *clients, name string) {\n\tt.Logf(\"Deleting clustertask %s\", name)\n\tif err := c.ClusterTaskClient.Delete(ctx, name, metav1.DeleteOptions{}); err != nil {\n\t\tt.Fatalf(\"Failed to delete clustertask: %v\", err)\n\t}\n}", "func (a *Server) DeleteRemoteCluster(ctx context.Context, clusterName string) error {\n\t// To make sure remote cluster exists - to protect against random\n\t// clusterName requests (e.g. when clusterName is set to local cluster name)\n\t_, err := a.GetRemoteCluster(clusterName)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\t// delete cert authorities associated with the cluster\n\terr = a.DeleteCertAuthority(ctx, types.CertAuthID{\n\t\tType: types.HostCA,\n\t\tDomainName: clusterName,\n\t})\n\tif err != nil {\n\t\t// this method could have succeeded on the first call,\n\t\t// but then if the remote cluster resource could not be deleted\n\t\t// it would be impossible to delete the cluster after then\n\t\tif !trace.IsNotFound(err) {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t}\n\t// there should be no User CA in trusted clusters on the main cluster side\n\t// per standard automation but clean up just in case\n\terr = a.DeleteCertAuthority(ctx, types.CertAuthID{\n\t\tType: types.UserCA,\n\t\tDomainName: clusterName,\n\t})\n\tif err != nil {\n\t\tif !trace.IsNotFound(err) {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t}\n\treturn a.Services.DeleteRemoteCluster(ctx, clusterName)\n}", "func (client RoverClusterClient) DeleteRoverCluster(ctx context.Context, request DeleteRoverClusterRequest) (response DeleteRoverClusterResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.DefaultRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.deleteRoverCluster, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = DeleteRoverClusterResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = DeleteRoverClusterResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(DeleteRoverClusterResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into DeleteRoverClusterResponse\")\n\t}\n\treturn\n}", "func (a *Controller) Deleting(cluster *clustersv1.Cluster) controllers.EnsureFunc {\n\treturn func(ctx context.Context) (reconcile.Result, error) {\n\n\t\tswitch cluster.Status.Status {\n\t\tcase corev1.SuccessStatus, corev1.FailureStatus, corev1.PendingStatus, \"\":\n\t\t\tcluster.Status.Status = corev1.DeletingStatus\n\n\t\t\treturn reconcile.Result{Requeue: true}, nil\n\n\t\tcase corev1.DeletingStatus, corev1.DeletedStatus:\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\n\t\t// else the cluster is not in a state to delete yet\n\t\treturn reconcile.Result{RequeueAfter: 30 * time.Second}, nil\n\t}\n}", "func Delete(client *golangsdk.ServiceClient, clusterId, id string) (r ErrorResult) {\n\t_, r.Err = client.Delete(deleteURL(client, clusterId, id), &golangsdk.RequestOpts{\n\t\tOkCodes: []int{200},\n\t\tMoreHeaders: map[string]string{\"Content-Type\": \"application/json\"},\n\t})\n\treturn\n}", "func Delete(c *golangsdk.ServiceClient, id, cluster_id string) (r DeleteResult) {\n\treqOpt := &golangsdk.RequestOpts{OkCodes: []int{200}}\n\t_, r.Err = c.Delete(resourceURL(c, id, cluster_id), reqOpt)\n\treturn\n}", "func (api *clusterAPI) SyncDelete(obj *cluster.Cluster) error {\n\tvar writeErr error\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, writeErr = apicl.ClusterV1().Cluster().Delete(context.Background(), &obj.ObjectMeta)\n\t}\n\n\tif writeErr == nil {\n\t\tapi.ct.handleClusterEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Deleted})\n\t}\n\n\treturn writeErr\n}", "func (c *FakeDaskClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteActionWithOptions(daskclustersResource, c.ns, name, opts), &kubernetesdaskorgv1.DaskCluster{})\n\n\treturn err\n}", "func (sqlStore *SQLStore) DeleteInActiveClusterInstallationByClusterID(clusterID string) (int64, error) {\n\tresult, err := sqlStore.execBuilder(sqlStore.db, sq.\n\t\tUpdate(\"ClusterInstallation\").\n\t\tSet(\"State\", model.ClusterInstallationStateDeletionRequested).\n\t\tWhere(\"ClusterID = ?\", clusterID).\n\t\tWhere(\"IsActive = ?\", false).\n\t\tWhere(\"DeleteAt = 0\"),\n\t)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"failed to mark inactive cluster installation as deleted\")\n\t}\n\trowsUpdated, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"failed to get total affcted rows from updating inactive cluster statement\")\n\t}\n\treturn rowsUpdated, nil\n}", "func (p *ClusterProvider) Delete() error {\n\tprov, err := p.Cmd.BinaryExists(\"helm\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"please install helm before running the command\")\n\t}\n\tif !prov {\n\t\treturn errors.New(\"please install helm before running the command\")\n\t}\n\tcharts := p.Spec.Charts\n\tfor _, chart := range charts {\n\t\tchartSource, err := source.NewFromMap(chart, p.Cmd, filepath.Join(p.WorkingDir, \"source\"))\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to initialize a source for chart\").WithField(\"chart\", chart)\n\t\t}\n\n\t\tname, err := chartSource.Name()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"could not get name for chart\").WithField(\"chart\", chart)\n\t\t}\n\n\t\tp.Cmd.Run(\"helm\", \"uninstall\", \"--namespace\", p.NameSpace, name, \"--kubeconfig\", p.KubeConfig)\n\n\t}\n\treturn nil\n}", "func (ca *Adapter) TriggerClusterDel(clusterName string) {\n\tlog.DefaultLogger.Debugf(\"Delete Cluster %s\", clusterName)\n\tca.clusterMng.RemovePrimaryCluster(clusterName)\n}", "func (s *BasecluListener) ExitCluster(ctx *ClusterContext) {}", "func (c *RestClient) DeleteTenant(tenantCluster string) (*models.TcaTask, error) {\n\n\tglog.Infof(\"Deleting tenant cluster %v\", tenantCluster)\n\n\tc.GetClient()\n\tresp, err := c.Client.R().Delete(c.BaseURL + fmt.Sprintf(TcaDeleteTenant, tenantCluster))\n\tif err != nil {\n\t\tglog.Error(err)\n\t\treturn nil, err\n\t}\n\n\tif c.isTrace && resp != nil {\n\t\tfmt.Println(string(resp.Body()))\n\t}\n\n\tif !resp.IsSuccess() {\n\t\treturn nil, c.checkErrors(resp)\n\t}\n\n\tvar task models.TcaTask\n\tif err := json.Unmarshal(resp.Body(), &task); err != nil {\n\t\tglog.Error(\"Failed parse server respond.\")\n\t\treturn nil, err\n\t}\n\n\treturn &task, nil\n}", "func (c *ECS) DeleteClusterRequest(input *DeleteClusterInput) (req *aws.Request, output *DeleteClusterOutput) {\n\toprw.Lock()\n\tdefer oprw.Unlock()\n\n\tif opDeleteCluster == nil {\n\t\topDeleteCluster = &aws.Operation{\n\t\t\tName: \"DeleteCluster\",\n\t\t\tHTTPMethod: \"POST\",\n\t\t\tHTTPPath: \"/\",\n\t\t}\n\t}\n\n\treq = c.newRequest(opDeleteCluster, input, output)\n\toutput = &DeleteClusterOutput{}\n\treq.Data = output\n\treturn\n}", "func (cmd *VirtualClusterCmd) Run(cobraCmd *cobra.Command, args []string) error {\n\tbaseClient, err := client.NewClientFromPath(cmd.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvirtualClusterName := \"\"\n\tif len(args) > 0 {\n\t\tvirtualClusterName = args[0]\n\t}\n\n\tvirtualClusterName, spaceName, clusterName, err := helper.SelectVirtualClusterAndSpaceAndClusterName(baseClient, virtualClusterName, cmd.Space, cmd.Cluster, cmd.Log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclusterClient, err := baseClient.Cluster(clusterName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgracePeriod := int64(0)\n\terr = clusterClient.Loft().StorageV1().VirtualClusters(spaceName).Delete(context.TODO(), virtualClusterName, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"delete virtual cluster\")\n\t}\n\n\tcmd.Log.Donef(\"Successfully deleted virtual cluster %s in space %s in cluster %s\", ansi.Color(virtualClusterName, \"white+b\"), ansi.Color(spaceName, \"white+b\"), ansi.Color(clusterName, \"white+b\"))\n\n\t// update kube config\n\tif cmd.DeleteContext {\n\t\terr = kubeconfig.DeleteContext(kubeconfig.VirtualClusterContextName(clusterName, spaceName, virtualClusterName))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd.Log.Donef(\"Successfully deleted kube context for virtual cluster %s\", ansi.Color(virtualClusterName, \"white+b\"))\n\t}\n\n\t// delete space\n\tif cmd.DeleteSpace {\n\t\terr = clusterClient.CoreV1().Namespaces().Delete(context.TODO(), spaceName, metav1.DeleteOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// wait for termination\n\t\tif cmd.Wait {\n\t\t\tcmd.Log.StartWait(\"Waiting for space to be deleted\")\n\t\t\tfor isSpaceStillThere(clusterClient, spaceName) {\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t\tcmd.Log.StopWait()\n\t\t}\n\n\t\tcmd.Log.Donef(\"Successfully deleted space %s\", spaceName)\n\t}\n\n\treturn nil\n}", "func UnregisterCluster(c echo.Context) error {\n\tcblog.Info(\"call UnregisterCluster()\")\n\n\tvar req struct {\n\t\tConnectionName string\n\t}\n\n\tif err := c.Bind(&req); err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\t// Call common-runtime API\n\tresult, err := cmrt.UnregisterResource(req.ConnectionName, rsCluster, c.Param(\"Name\"))\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\tresultInfo := BooleanInfo{\n\t\tResult: strconv.FormatBool(result),\n\t}\n\n\treturn c.JSON(http.StatusOK, &resultInfo)\n}", "func (client OpenShiftManagedClustersClient) Delete(ctx context.Context, resourceGroupName string, resourceName string) (result OpenShiftManagedClustersDeleteFuture, err error) {\n\treq, err := client.DeletePreparer(ctx, resourceGroupName, resourceName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Delete\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.DeleteSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Delete\", result.Response(), \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func DeleteAzureCluster(cs *banzaiSimpleTypes.ClusterSimple, c *gin.Context) bool {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetCluster, \"Start delete azure cluster\")\n\n\tif cs == nil {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetCluster, \"<nil> cluster\")\n\t\treturn false\n\t}\n\n\t// set azure props\n\tdatabase.SelectFirstWhere(&cs.Azure, banzaiSimpleTypes.AzureClusterSimple{ClusterSimpleId: cs.ID})\n\tif DeleteClusterAzure(c, cs.Name, cs.Azure.ResourceGroup) {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetCluster, \"Delete succeeded\")\n\t\treturn true\n\t} else {\n\t\tbanzaiUtils.LogWarn(banzaiConstants.TagGetCluster, \"Can't delete cluster from cloud!\")\n\t\tSetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tJsonKeyStatus: http.StatusBadRequest,\n\t\t\tJsonKeyMessage: \"Can't delete cluster!\",\n\t\t\tJsonKeyResourceId: cs.ID,\n\t\t})\n\t\treturn false\n\t}\n}", "func (a *ClustersApiService) DeleteCluster(ctx _context.Context, space string, clusterId string) ApiDeleteClusterRequest {\n\treturn ApiDeleteClusterRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tspace: space,\n\t\tclusterId: clusterId,\n\t}\n}", "func (p *Provider) Delete(name, explicitKubeconfigPath string) error {\n\treturn internaldelete.Cluster(p.logger, p.ic(name), explicitKubeconfigPath)\n}", "func (svc *SSHKeysService) RemoveFromCluster(ctx context.Context, prj, dc, cls, id string) (*http.Response, error) {\n\tpath := clusterSSHKeyPath(prj, dc, cls, id)\n\treturn svc.client.resourceDelete(ctx, path)\n}", "func (c *ClustersController) Delete(ctx *app.DeleteClustersContext) error {\n\terr := c.app.ClusterService().Delete(ctx, ctx.ClusterID)\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"error\": err,\n\t\t}, \"error while deleting a cluster configuration\")\n\t\treturn app.JSONErrorResponse(ctx, err)\n\t}\n\treturn ctx.NoContent()\n}", "func (a ClustersAPI) Terminate(clusterID string) error {\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\t_, err := a.Client.performQuery(http.MethodPost, \"/clusters/delete\", data, nil)\n\treturn err\n}", "func DeleteAKSCluster(client autorest.Client, urlParameters map[string]interface{}, apiVersion string) {\r\n\r\n\tqueryParameters := map[string]interface{}{\r\n\t\t\"api-version\": apiVersion,\r\n\t}\r\n\tpreparerDecorators := []autorest.PrepareDecorator{\r\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\r\n\t\tautorest.WithMethod(\"DELETE\"),\r\n\t\tautorest.WithBaseURL(azure.PublicCloud.ResourceManagerEndpoint),\r\n\t\tautorest.WithPathParameters(\r\n\t\t\t\"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.containerservice/managedclusters/{resourceName}\",\r\n\t\t\turlParameters,\r\n\t\t),\r\n\t\tautorest.WithQueryParameters(queryParameters),\r\n\t}\r\n\r\n\tpreparer := autorest.CreatePreparer(preparerDecorators...)\r\n\treq, err := preparer.Prepare((&http.Request{}).WithContext(context.Background()))\r\n\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tfmt.Println(req.URL)\r\n\r\n\tresp, err := client.Do(req)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\terr = autorest.Respond(\r\n\t\tresp,\r\n\t\tclient.ByInspecting(),\r\n\t)\r\n\r\n\tfmt.Println(resp.Status)\r\n}", "func (m *MockRdbClient) DeleteCluster(arg0 context.Context, arg1 *v1alpha.DeleteClusterRequest, arg2 ...grpc.CallOption) (*emptypb.Empty, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteCluster\", varargs...)\n\tret0, _ := ret[0].(*emptypb.Empty)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (instance *Host) Delete(ctx context.Context) (ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tdefer func() {\n\t\t// drop the cache when we are done creating the cluster\n\t\tif ka, err := instance.Service().GetCache(context.Background()); err == nil {\n\t\t\tif ka != nil {\n\t\t\t\t_ = ka.Clear(context.Background())\n\t\t\t}\n\t\t}\n\t}()\n\n\tif valid.IsNil(instance) {\n\t\treturn fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\n\txerr := instance.Inspect(ctx, func(clonable data.Clonable, props *serialize.JSONProperties) fail.Error {\n\t\t// Do not remove a Host that is a gateway\n\t\treturn props.Inspect(hostproperty.NetworkV2, func(clonable data.Clonable) fail.Error {\n\t\t\thostNetworkV2, ok := clonable.(*propertiesv2.HostNetworking)\n\t\t\tif !ok {\n\t\t\t\treturn fail.InconsistentError(\"'*propertiesv2.HostNetworking' expected, '%s' provided\", reflect.TypeOf(clonable).String())\n\t\t\t}\n\n\t\t\tif hostNetworkV2.IsGateway {\n\t\t\t\treturn fail.NotAvailableError(\"cannot delete Host, it's a gateway that can only be deleted through its Subnet\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t})\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\txerr = instance.RelaxedDeleteHost(cleanupContextFrom(ctx))\n\treturn xerr\n}", "func Delete(c *client.Client, clusterrolename string) error {\n\terr := c.Clientset.RbacV1().ClusterRoles().Delete(\n\t\tcontext.TODO(),\n\t\tclusterrolename,\n\t\tmetav1.DeleteOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (h *Handler) serveDeleteClusterAdmin(w http.ResponseWriter, r *http.Request) {}", "func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {\n\tif err := c.restore(); errors.IsNotFound(err) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.Driver.Remove(ctx, toInfo(c)); err != nil {\n\t\t// Persist store removal must take place despite error to prevent cluster from being stuck in remove state\n\t\t// TODO: We should add a \"forceRemove\" action to cluster and then revert this to return an error, so that\n\t\t// the user can see the problem and take appropriate action\n\t\tif !forceRemove {\n\t\t\treturn fmt.Errorf(\"Error removing cluster [%s] with driver [%s]: %v\", c.Name, c.DriverName, err)\n\t\t}\n\t\tlogrus.Errorf(\"Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v\", c.Name, c.DriverName, err)\n\t}\n\treturn c.PersistStore.Remove(c.Name)\n}", "func ExampleClustersClient_Delete() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\t_, err = clientFactory.NewClustersClient().Delete(ctx, \"resRg\", \"myCluster\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n}", "func (nh *NodeHost) StopCluster(clusterID uint64) error {\n\treturn nh.stopNode(clusterID, 0, false)\n}", "func (a *HyperflexApiService) DeleteHyperflexBackupClusterExecute(r ApiDeleteHyperflexBackupClusterRequest) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"HyperflexApiService.DeleteHyperflexBackupCluster\")\n\tif err != nil {\n\t\treturn nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/hyperflex/BackupClusters/{Moid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"Moid\"+\"}\", url.PathEscape(parameterToString(r.moid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v Error\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (*OktetoClusterHelper) Delete(_ string) error {\n\treturn ErrNotImplemented\n}", "func (ctrler CtrlDefReactor) OnClusterDelete(obj *Cluster) error {\n\tlog.Info(\"OnClusterDelete is not implemented\")\n\treturn nil\n}", "func (r *ClusterDeleteRequest) Send() (result *ClusterDeleteResponse, err error) {\n\treturn r.SendContext(context.Background())\n}", "func deleteServiceAccountFromCluster(unjoiningClusterClientset internalclientset.Interface, cluster *federationapi.Cluster, fedSystemNamespace string) error {\n\tserviceAccountName, ok := cluster.ObjectMeta.Annotations[kubectl.ServiceAccountNameAnnotation]\n\tif !ok {\n\t\t// If there is no service account name annotation, assume that this cluster does not have a federation control plane service account.\n\t\treturn nil\n\t}\n\treturn unjoiningClusterClientset.Core().ServiceAccounts(fedSystemNamespace).Delete(serviceAccountName, &metav1.DeleteOptions{})\n}", "func (o *DeleteClusterParams) WithTimeout(timeout time.Duration) *DeleteClusterParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func cleanCluster(req *restful.Request, clusterID string) error {\n\t// 参数\n\tdata := operator.M{\n\t\tclusterIDTag: \"\",\n\t\tupdateTimeTag: time.Now(),\n\t}\n\tcondition := operator.NewLeafCondition(operator.Eq, operator.M{clusterIDTag: clusterID})\n\treturn UpdateMany(req.Request.Context(), tableName, condition, data)\n}", "func (a *Client) V2DeregisterCluster(ctx context.Context, params *V2DeregisterClusterParams) (*V2DeregisterClusterNoContent, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"v2DeregisterCluster\",\n\t\tMethod: \"DELETE\",\n\t\tPathPattern: \"/v2/clusters/{cluster_id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V2DeregisterClusterReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V2DeregisterClusterNoContent), nil\n\n}", "func (c *DatabaseNode) Delete(contrailClient contrailclient.ApiClient) error {\n\tdatabaseInfoLog.Printf(\"Deleting %s %s\", c.Hostname, nodeType)\n\tobj, err := contrailclient.GetContrailObjectByName(contrailClient, string(nodeType), c.Hostname)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn contrailClient.Delete(obj)\n}", "func (c *krakenClusters) Delete(name string, options *v1.DeleteOptions) error {\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"krakenclusters\").\n\t\tName(name).\n\t\tBody(options).\n\t\tDo().\n\t\tError()\n}", "func (s *RedisClusterStore) Delete(ctx context.Context, key interface{}) error {\n\t_, err := s.clusclient.Del(ctx, key.(string)).Result()\n\treturn err\n}", "func deleteClusterRoleBindingFromCluster(unjoiningClusterClientset internalclientset.Interface, cluster *federationapi.Cluster) error {\n\tclusterRoleName, ok := cluster.ObjectMeta.Annotations[kubectl.ClusterRoleNameAnnotation]\n\tif !ok {\n\t\t// If there is no cluster role name annotation, assume that this cluster does not have cluster role bindings.\n\t\treturn nil\n\t}\n\n\terr := unjoiningClusterClientset.Rbac().ClusterRoleBindings().Delete(clusterRoleName, &metav1.DeleteOptions{})\n\tif err != nil && !errors.IsMethodNotSupported(err) && !errors.IsNotFound(err) {\n\t\treturn err\n\t}\n\terr = unjoiningClusterClientset.Rbac().ClusterRoles().Delete(clusterRoleName, &metav1.DeleteOptions{})\n\tif err != nil && !errors.IsMethodNotSupported(err) && !errors.IsNotFound(err) {\n\t\treturn err\n\t}\n\treturn nil\n}" ]
[ "0.77345514", "0.7503085", "0.74316007", "0.73912096", "0.7315511", "0.72915375", "0.72617304", "0.72506875", "0.71797746", "0.71408546", "0.71262103", "0.70984936", "0.70816237", "0.6998919", "0.69962054", "0.69699913", "0.6955641", "0.6786505", "0.67715704", "0.6723802", "0.6691407", "0.66834086", "0.6644122", "0.6626703", "0.66184825", "0.66038305", "0.65800834", "0.65740794", "0.65715647", "0.6551502", "0.6521122", "0.6516382", "0.65134996", "0.6426402", "0.6418197", "0.63979113", "0.6386134", "0.63731956", "0.6372443", "0.63591146", "0.6332806", "0.63253593", "0.63252765", "0.6299922", "0.62765944", "0.6231771", "0.623086", "0.6213522", "0.6147827", "0.6125117", "0.6056566", "0.60229486", "0.6017844", "0.59798706", "0.59751207", "0.59720564", "0.59644455", "0.59268856", "0.58960366", "0.58891535", "0.5886765", "0.58800906", "0.5876714", "0.58546287", "0.5779502", "0.5747941", "0.57348514", "0.573138", "0.5725705", "0.57237256", "0.5723564", "0.57213295", "0.57006824", "0.5692487", "0.5680514", "0.5677206", "0.5673335", "0.56717217", "0.56585574", "0.564058", "0.56227744", "0.5611516", "0.5606864", "0.56013507", "0.5585801", "0.5579964", "0.55739176", "0.55681825", "0.55443203", "0.5543923", "0.5538813", "0.55281025", "0.55174047", "0.5504761", "0.5479913", "0.54729354", "0.5458804", "0.54582256", "0.54541713", "0.5453231" ]
0.7714795
1
GenCerts generate a CA cert or a server cert signed by CA cert if isCA is true, the outfile will be a CA cert/key named as cacert.pem/cakey.pem if isCA is false, the outfile will be named as is, for example, outfilecert.pem, outfilekey.pem
func GenCerts(hosts []string, outname string, isCA bool) (err error) { priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return fmt.Errorf("GenerateKey: %v", err) } template := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ Organization: []string{"Acme Co"}, }, NotBefore: time.Now(), NotAfter: time.Now().Add(time.Hour * 24 * 3650), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, } var ( cakey *ecdsa.PrivateKey cacrt *x509.Certificate derBytes []byte ) // valid for these names if isCA { template.IsCA = true template.KeyUsage |= x509.KeyUsageCertSign outname = "ca" derBytes, err = x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv) if err != nil { return fmt.Errorf("Failed to create certificate: %v", err) } } else { for _, h := range hosts { if ip := net.ParseIP(h); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { template.DNSNames = append(template.DNSNames, h) } } // ca key file ca_data, err := os.ReadFile("ca-key.pem") if err != nil { return fmt.Errorf("Read ca-key.pem: %v", err) } block, _ := pem.Decode(ca_data) cakey, _ = x509.ParseECPrivateKey(block.Bytes) // ca cert file ca_data, err = os.ReadFile("ca-cert.pem") if err != nil { return fmt.Errorf("Read ca-cert.pem: %v", err) } block, _ = pem.Decode(ca_data) cacrt, _ = x509.ParseCertificate(block.Bytes) // generate C2 server certificate, signed by our CA derBytes, err = x509.CreateCertificate(rand.Reader, &template, cacrt, publicKey(priv), cakey) if err != nil { return fmt.Errorf("Failed to create certificate: %v", err) } } // output to pem files out := &bytes.Buffer{} outcert := fmt.Sprintf("%s-cert.pem", outname) outkey := fmt.Sprintf("%s-key.pem", outname) // cert pem.Encode(out, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) err = os.WriteFile(outcert, out.Bytes(), 0600) if err != nil { return fmt.Errorf("Write %s: %v", outcert, err) } out.Reset() // key pem.Encode(out, pemBlockForKey(priv)) err = os.WriteFile(outkey, out.Bytes(), 0600) if err != nil { return fmt.Errorf("Write %s: %v", outkey, err) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genCerts(date time.Time) ([]byte, []byte, error) {\n\t// Create ca signing key\n\tca := &x509.Certificate{\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"I Can Haz Expired Certs\"},\n\t\t},\n\t\tSerialNumber: big.NewInt(42),\n\t\tNotBefore: date.Truncate(8760 * time.Hour),\n\t\tNotAfter: date,\n\t\tIsCA: true,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},\n\t\tKeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tBasicConstraintsValid: true,\n\t}\n\n\t// Create a private key\n\tkey, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Could not generate rsa key - %s\", err)\n\t}\n\n\t// Use ca key to sign a CSR and create a public Cert\n\tcsr := &key.PublicKey\n\tcert, err := x509.CreateCertificate(rand.Reader, ca, ca, csr, key)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Could not generate certificate - %s\", err)\n\t}\n\n\t// Convert keys into []byte\n\tc := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: cert})\n\tk := pem.EncodeToMemory(&pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(key)})\n\treturn c, k, nil\n}", "func (am *admissionManager) generateCerts(create bool) (\n\tserverCertificate, serverPrivateKey, caCertificate []byte,\n\terr error) {\n\tvar caPrivateKey []byte\n\tcaPrivateKey, err = utils.SetUpCaKey()\n\tif err != nil {\n\t\tklog.Errorf(\"set up ca key failed %v\", err)\n\t\treturn nil, nil, nil, err\n\t}\n\tcaCertificate, err = utils.SetUpCaCert(webhookconstants.ComponentName, caPrivateKey)\n\tif err != nil {\n\t\tklog.Errorf(\"set up ca cert failed %v\", err)\n\t\treturn nil, nil, nil, err\n\t}\n\tnamespace := utils.GetCurrentNamespace()\n\tdomains, ips := subjectAltNames(namespace, am.externalService)\n\tserverCertificate, serverPrivateKey, err = utils.SetUpSignedCertAndKey(domains, ips,\n\t\twebhookconstants.ComponentName,\n\t\tcaPrivateKey, caCertificate, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth})\n\tif err != nil {\n\t\tklog.Errorf(\"set up server cert error %v\", err)\n\t\treturn nil, nil, nil, err\n\t}\n\tif create {\n\t\t// try to create a new secret to save certificate and privateKey and ca certificate.\n\t\t_, err = am.kubeClient.CoreV1().Secrets(namespace).Create(context.Background(),\n\t\t\t&corev1.Secret{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: certsSecretName,\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t},\n\t\t\t\tData: map[string][]byte{\n\t\t\t\t\tserverCert: serverCertificate,\n\t\t\t\t\tserverKey: serverPrivateKey,\n\t\t\t\t\tcaCert: caCertificate,\n\t\t\t\t},\n\t\t\t}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"create new certificate secret error %v\", err)\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t} else {\n\t\t// try to update an old secret to save certificate and privateKey and ca certificate.\n\t\tif err = utils.UpdateSecret(am.kubeClient, namespace, certsSecretName,\n\t\t\tfunc(secret *corev1.Secret) {\n\t\t\t\tsecret.Data = map[string][]byte{\n\t\t\t\t\tserverCert: serverCertificate,\n\t\t\t\t\tserverKey: serverPrivateKey,\n\t\t\t\t\tcaCert: caCertificate,\n\t\t\t\t}\n\t\t\t}); err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t}\n\treturn caCertificate, serverCertificate, serverPrivateKey, nil\n}", "func GenerateCA(c *cli.Context) error {\n\thost := c.String(\"host\")\n\n\trsaBits := c.Int(\"rsa-bits\")\n\tecdsaCurve := c.String(\"ecdsa-curve\")\n\n\tvalidFrom := c.String(\"start-date\")\n\n\tvalidFor := c.Duration(\"duration\")\n\tcert, key, err := Ca(host, rsaBits, ecdsaCurve, validFrom, validFor)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create certificate: %s\", err)\n\t}\n\tvar certname = \"0.cert\"\n\tvar keyname = \"0.key\"\n\n\tcertout, err := os.Create(certname)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to open \"+certname+\" for writing: %s\", err)\n\t}\n\tpem.Encode(certout, &cert)\n\tcertout.Close()\n\tlog.Print(\"written \" + certname + \"\\n\")\n\n\tkeyout, err := os.OpenFile(keyname, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\tlog.Print(\"failed to open \"+keyname+\" for writing:\", err)\n\t\treturn nil\n\t}\n\tpem.Encode(keyout, &key)\n\tkeyout.Close()\n\tlog.Print(\"written \" + keyname + \"\\n\")\n\treturn nil\n}", "func genCertsIfMIssing(\n\tt *testing.T,\n\tcapem string,\n\tcakey string,\n) error {\n\t_, err := os.Stat(capem)\n\tif err == nil {\n\t\t_, err = os.Stat(cakey)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to generate serial number: %s\", err)\n\t}\n\n\tcaTemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tCountry: []string{\"US\"},\n\t\t\tOrganization: []string{\"elastic\"},\n\t\t\tOrganizationalUnit: []string{\"beats\"},\n\t\t},\n\t\tIssuer: pkix.Name{\n\t\t\tCountry: []string{\"US\"},\n\t\t\tOrganization: []string{\"elastic\"},\n\t\t\tOrganizationalUnit: []string{\"beats\"},\n\t\t\tLocality: []string{\"locality\"},\n\t\t\tProvince: []string{\"province\"},\n\t\t\tStreetAddress: []string{\"Mainstreet\"},\n\t\t\tPostalCode: []string{\"12345\"},\n\t\t\tSerialNumber: \"23\",\n\t\t\tCommonName: \"*\",\n\t\t},\n\t\tIPAddresses: []net.IP{\n\t\t\tnet.IP{127, 0, 0, 1},\n\t\t},\n\n\t\tSignatureAlgorithm: x509.SHA512WithRSA,\n\t\tPublicKeyAlgorithm: x509.ECDSA,\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().AddDate(10, 0, 0),\n\t\tSubjectKeyId: []byte(\"12345\"),\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: true,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{\n\t\t\tx509.ExtKeyUsageClientAuth,\n\t\t\tx509.ExtKeyUsageServerAuth},\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment |\n\t\t\tx509.KeyUsageDigitalSignature |\n\t\t\tx509.KeyUsageCertSign,\n\t}\n\n\t// generate keys\n\tpriv, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to generate ca private key: %v\", err)\n\t}\n\tpub := &priv.PublicKey\n\n\t// generate certificate\n\tcaBytes, err := x509.CreateCertificate(\n\t\trand.Reader,\n\t\t&caTemplate,\n\t\t&caTemplate,\n\t\tpub, priv)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to generate ca certificate: %v\", err)\n\t}\n\n\t// write key file\n\tkeyOut, err := os.OpenFile(cakey, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to open key file for writing: %v\", err)\n\t}\n\tpem.Encode(keyOut, &pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(priv)})\n\tkeyOut.Close()\n\n\t// write certificate\n\tcertOut, err := os.Create(capem)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to open cert.pem for writing: %s\", err)\n\t}\n\tpem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: caBytes})\n\tcertOut.Close()\n\n\treturn nil\n}", "func OutputCerts(config *certgenConfig, kubeclient *kubernetes.Clientset, certs *certs.Certificates) error {\n\tvar secrets []*corev1.Secret\n\tvar errs []error\n\n\tforce := certgen.NoOverwrite\n\tif config.Overwrite {\n\t\tforce = certgen.Overwrite\n\t}\n\n\tif config.OutputYAML || config.OutputKube {\n\t\tswitch config.Format {\n\t\tcase \"legacy\":\n\t\t\tsecrets, errs = certgen.AsLegacySecrets(config.Namespace, config.NameSuffix, certs)\n\t\tcase \"compact\":\n\t\t\tsecrets, errs = certgen.AsSecrets(config.Namespace, config.NameSuffix, certs)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unsupported Secrets format %q\", config.Format)\n\t\t}\n\n\t\tif len(errs) > 0 {\n\t\t\treturn utilerrors.NewAggregate(errs)\n\t\t}\n\t}\n\n\tif config.OutputPEM {\n\t\tfmt.Printf(\"Writing certificates to PEM files in %s/\\n\", config.OutputDir)\n\t\tif err := certgen.WriteCertsPEM(config.OutputDir, certs, force); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write certificates to %q: %w\", config.OutputDir, err)\n\t\t}\n\t}\n\n\tif config.OutputYAML {\n\t\tfmt.Printf(\"Writing %q format Secrets to YAML files in %s/\\n\", config.Format, config.OutputDir)\n\t\tif err := certgen.WriteSecretsYAML(config.OutputDir, secrets, force); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write Secrets to %q: %w\", config.OutputDir, err)\n\t\t}\n\t}\n\n\tif config.OutputKube {\n\t\tfmt.Printf(\"Writing %q format Secrets to namespace %q\\n\", config.Format, config.Namespace)\n\t\tif err := certgen.WriteSecretsKube(kubeclient, secrets, force); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write certificates to %q: %w\", config.Namespace, err)\n\t\t}\n\t}\n\treturn nil\n}", "func GenerateCert(hosts []string, certFile, keyFile, caFile, caKeyFile, org string, bits int) error {\n\ttemplate, err := newCertificate(org)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// client\n\tif len(hosts) == 1 && hosts[0] == \"\" {\n\t\ttemplate.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}\n\t\ttemplate.KeyUsage = x509.KeyUsageDigitalSignature\n\t} else { // server\n\t\ttemplate.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}\n\t\tfor _, h := range hosts {\n\t\t\tif ip := net.ParseIP(h); ip != nil {\n\t\t\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\n\t\t\t} else {\n\t\t\t\ttemplate.DNSNames = append(template.DNSNames, h)\n\t\t\t}\n\t\t}\n\t}\n\n\ttlsCert, err := tls.LoadX509KeyPair(caFile, caKeyFile)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tpriv, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tx509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, template, x509Cert, &priv.PublicKey, tlsCert.PrivateKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertOut, err := os.Create(certFile)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tpem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\tcertOut.Close()\n\n\tkeyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tpem.Encode(keyOut, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)})\n\tkeyOut.Close()\n\n\treturn nil\n}", "func GenCert(certf string, keyf string, certtype bool, addHosts bool) error {\n\t/* Create the basenames if needed */\n\tdir := filepath.Dir(certf)\n\terr := os.MkdirAll(dir, 0750)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdir = filepath.Dir(keyf)\n\terr = os.MkdirAll(dir, 0750)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertBytes, keyBytes, err := GenerateMemCert(certtype, addHosts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertOut, err := os.Create(certf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to open %s for writing: %w\", certf, err)\n\t}\n\n\t_, err = certOut.Write(certBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to write cert file: %w\", err)\n\t}\n\n\terr = certOut.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to close cert file: %w\", err)\n\t}\n\n\tkeyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to open %s for writing: %w\", keyf, err)\n\t}\n\n\t_, err = keyOut.Write(keyBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to write key file: %w\", err)\n\t}\n\n\terr = keyOut.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to close key file: %w\", err)\n\t}\n\n\treturn nil\n}", "func createCertificates(_ *testing.T) error {\n\tvar err error\n\tvar srcCaCrt *os.File\n\tvar srcTLSCrt *os.File\n\tvar srcTLSKey *os.File\n\tvar destCaCrt *os.File\n\tvar destTLSCrt *os.File\n\tvar destTLSKey *os.File\n\n\tdir := \"/tmp/k8s-webhook-server/serving-certs\"\n\n\t// create directory if not existing yet\n\t_ = os.Mkdir(\"/tmp/k8s-webhook-server\", os.ModePerm)\n\t_ = os.Mkdir(dir, os.ModePerm)\n\n\t// open src files\n\tif srcCaCrt, err = os.Open(\"../../test/certs/ca.crt\"); err != nil {\n\t\treturn err\n\t}\n\tdefer srcCaCrt.Close()\n\tif srcTLSCrt, err = os.Open(\"../../test/certs/tls.crt\"); err != nil {\n\t\treturn err\n\t}\n\tdefer srcTLSCrt.Close()\n\tif srcTLSKey, err = os.Open(\"../../test/certs/tls.key\"); err != nil {\n\t\treturn err\n\t}\n\tdefer srcTLSKey.Close()\n\n\t// open dest files\n\tif destCaCrt, err = os.Create(fmt.Sprintf(\"%s/%s\", dir, \"ca.crt\")); err != nil {\n\t\treturn err\n\t}\n\tdefer destCaCrt.Close()\n\tif destTLSCrt, err = os.Create(fmt.Sprintf(\"%s/%s\", dir, \"tls.crt\")); err != nil {\n\t\treturn err\n\t}\n\tdefer destTLSCrt.Close()\n\tif destTLSKey, err = os.Create(fmt.Sprintf(\"%s/%s\", dir, \"tls.key\")); err != nil {\n\t\treturn err\n\t}\n\tdefer destTLSKey.Close()\n\n\t// copy ca.crt\n\tif _, err := io.Copy(destCaCrt, srcCaCrt); err != nil {\n\t\treturn err\n\t}\n\t// copy tls.crt\n\tif _, err := io.Copy(destTLSCrt, srcTLSCrt); err != nil {\n\t\treturn err\n\t}\n\t// copy tls.key\n\tif _, err := io.Copy(destTLSKey, srcTLSKey); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func writeCApem(t *testing.T, srv *httptest.Server, tmpDir string, certName string) *os.File {\n\tcaPEM := new(bytes.Buffer)\n\terr := pem.Encode(caPEM, &pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: srv.Certificate().Raw,\n\t})\n\trequire.NoError(t, err)\n\n\t// Then write the ca.pem to disk\n\tcaPem, err := os.Create(filepath.Join(tmpDir, certName))\n\trequire.NoError(t, err)\n\t_, err = caPem.Write(caPEM.Bytes())\n\trequire.NoError(t, err)\n\treturn caPem\n}", "func GenerateCert() (path string, err error) {\n\tpath, err = ioutil.TempDir(\"\", \"\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// generate a key\n\tprivKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// create a certificate template\n\ttmpl := &x509.Certificate{\n\t\tSerialNumber: big.NewInt(1),\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: \"jsonnet-controller\",\n\t\t\tOrganization: []string{\"pelotech\"},\n\t\t},\n\t\tDNSNames: []string{\n\t\t\t\"jsonnet-controller\",\n\t\t},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().Add(time.Hour * 24 * 365),\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\n\t// self-sign the certificate\n\tderBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &privKey.PublicKey, privKey)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Write files to disk\n\tcertPath := filepath.Join(path, \"tls.crt\")\n\tkeyPath := filepath.Join(path, \"tls.key\")\n\n\tcf, err := os.Create(certPath)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer cf.Close()\n\tif err = pem.Encode(cf, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes}); err != nil {\n\t\treturn\n\t}\n\n\tkf, err := os.Create(keyPath)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer kf.Close()\n\tif err = pem.Encode(kf, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privKey)}); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func registerCertGen(app *kingpin.Application) (*kingpin.CmdClause, *certgenConfig) {\n\tvar certgenConfig certgenConfig\n\tcertgenApp := app.Command(\"certgen\", \"Generate new TLS certs for bootstrapping gRPC over TLS.\")\n\tcertgenApp.Arg(\"outputdir\", \"Directory to write output files into (default \\\"certs\\\").\").Default(\"certs\").StringVar(&certgenConfig.OutputDir)\n\n\t// NOTE: --certificate-lifetime can be used to accept Duration string once certificate rotation is supported.\n\tcertgenApp.Flag(\"certificate-lifetime\", \"Generated certificate lifetime (in days).\").Default(strconv.Itoa(certs.DefaultCertificateLifetime)).UintVar(&certgenConfig.Lifetime)\n\tcertgenApp.Flag(\"incluster\", \"Use in cluster configuration.\").BoolVar(&certgenConfig.InCluster)\n\tcertgenApp.Flag(\"kube\", \"Apply the generated certs directly to the current Kubernetes cluster.\").BoolVar(&certgenConfig.OutputKube)\n\tcertgenApp.Flag(\"kubeconfig\", \"Path to kubeconfig (if not in running inside a cluster).\").Default(filepath.Join(os.Getenv(\"HOME\"), \".kube\", \"config\")).StringVar(&certgenConfig.KubeConfig)\n\tcertgenApp.Flag(\"namespace\", \"Kubernetes namespace, used for Kube objects.\").Default(certs.DefaultNamespace).Envar(\"CONTOUR_NAMESPACE\").StringVar(&certgenConfig.Namespace)\n\tcertgenApp.Flag(\"overwrite\", \"Overwrite existing files or Secrets.\").BoolVar(&certgenConfig.Overwrite)\n\tcertgenApp.Flag(\"pem\", \"Render the generated certs as individual PEM files to the current directory.\").BoolVar(&certgenConfig.OutputPEM)\n\tcertgenApp.Flag(\"secrets-format\", \"Specify how to format the generated Kubernetes Secrets.\").Default(\"legacy\").StringVar(&certgenConfig.Format)\n\tcertgenApp.Flag(\"secrets-name-suffix\", \"Specify a suffix to be appended to the generated Kubernetes secrets' names.\").StringVar(&certgenConfig.NameSuffix)\n\tcertgenApp.Flag(\"yaml\", \"Render the generated certs as Kubernetes Secrets in YAML form to the current directory.\").BoolVar(&certgenConfig.OutputYAML)\n\n\treturn certgenApp, &certgenConfig\n}", "func GenerateCertificateFiles() (string, string, error) {\n\tcert, certKey, err := GenerateCertificate()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tcertFile, err := ioutil.TempFile(\"\", \"cert\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\t_, err = certFile.Write(cert)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\t_ = certFile.Close()\n\n\tcertKeyFile, err := ioutil.TempFile(\"\", \"certKey\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t_, err = certKeyFile.Write(certKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\t_ = certKeyFile.Close()\n\treturn certFile.Name(), certKeyFile.Name(), err\n}", "func (s *sidecar) writeCerts(file string, data []byte) error {\n\tcerts, err := x509.ParseCertificates(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpemData := []byte{}\n\tfor _, cert := range certs {\n\t\tb := &pem.Block{\n\t\t\tType: \"CERTIFICATE\",\n\t\t\tBytes: cert.Raw,\n\t\t}\n\t\tpemData = append(pemData, pem.EncodeToMemory(b)...)\n\t}\n\n\treturn ioutil.WriteFile(file, pemData, certsFileMode)\n}", "func (c EasyCert) generateCA(caFile string) (*x509.Certificate, crypto.PrivateKey, error) {\n\ttemplate := c.newCertificate()\n\ttemplate.IsCA = true\n\ttemplate.KeyUsage |= x509.KeyUsageCertSign\n\ttemplate.Subject.CommonName = c.org\n\n\tpriv, err := c.newPrivateKey()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, template, template, priv.(crypto.Signer).Public(), priv)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tca, err := x509.ParseCertificate(derBytes)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcertOut, err := os.Create(caFile)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer certOut.Close()\n\tif err := pem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes}); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn ca, priv, nil\n}", "func genCertPair(certFile, keyFile string) error {\n\tlog.Infof(\"Generating TLS certificates...\")\n\n\t// Create directories for cert and key files if they do not yet exist.\n\tcertDir, _ := filepath.Split(certFile)\n\tkeyDir, _ := filepath.Split(keyFile)\n\tif err := os.MkdirAll(certDir, 0700); err != nil {\n\t\treturn err\n\t}\n\tif err := os.MkdirAll(keyDir, 0700); err != nil {\n\t\treturn err\n\t}\n\n\t// Generate cert pair.\n\torg := \"btcwallet autogenerated cert\"\n\tvalidUntil := time.Now().Add(10 * 365 * 24 * time.Hour)\n\tcert, key, err := btcutil.NewTLSCertPair(org, validUntil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write cert and key files.\n\tif err = ioutil.WriteFile(certFile, cert, 0666); err != nil {\n\t\treturn err\n\t}\n\tif err = ioutil.WriteFile(keyFile, key, 0600); err != nil {\n\t\tos.Remove(certFile)\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Done generating TLS certificates\")\n\treturn nil\n}", "func GenerateCACertificate(certFile, keyFile, org string, bits int) error {\n\ttemplate, err := newCertificate(org)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttemplate.IsCA = true\n\ttemplate.KeyUsage |= x509.KeyUsageCertSign\n\n\tpriv, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, template, template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertOut, err := os.Create(certFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\tcertOut.Close()\n\n\tkeyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tpem.Encode(keyOut, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)})\n\tkeyOut.Close()\n\n\treturn nil\n}", "func OSSFuzzGenerateCerts(w http.ResponseWriter, r *http.Request) {\n\tvar project types.OssFuzzProject\n\tquery := datastore.NewQuery(\"OssFuzzProject\")\n\tit := db.RunQuery(r.Context(), query)\n\n\tfor it.Next(&project) {\n\t\tvar tls types.WorkerTlsCert\n\t\tentityKey := datastore.Key{\n\t\t\tKind: \"WorkerTlsCert\",\n\t\t\tName: project.Name,\n\t\t}\n\t\terr := db.Get(r.Context(), &entityKey, &tls)\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogs.Logf(\"Generating cert for %s.\", project.Name)\n\t\tcertPem, keyPem, err := generateCert(project.Name)\n\t\tif err != nil {\n\t\t\tlogs.Errorf(\"Failed to generate cert for %s: %+v\", project.Name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\ttls.ProjectName = project.Name\n\t\ttls.CertContents = certPem\n\t\ttls.KeyContents = keyPem\n\t\t_, err = db.Put(r.Context(), &entityKey, &tls)\n\t\tif err != nil {\n\t\t\tlogs.Errorf(\"Failed to put cert for %s: %+v\", project.Name, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tif err := it.Err(); err != nil {\n\t\tlogs.Errorf(\"Failed to query projects: %+v\", err)\n\t}\n}", "func GenerateDatabaseCertificates(ctx context.Context, req GenerateDatabaseCertificatesRequest) ([]string, error) {\n\n\tif len(req.Principals) == 0 ||\n\t\t(len(req.Principals) == 1 && req.Principals[0] == \"\" && req.OutputFormat != identityfile.FormatSnowflake) {\n\n\t\treturn nil, trace.BadParameter(\"at least one hostname must be specified\")\n\t}\n\n\t// For CockroachDB node certificates, CommonName must be \"node\":\n\t//\n\t// https://www.cockroachlabs.com/docs/v21.1/cockroach-cert#node-key-and-certificates\n\tif req.OutputFormat == identityfile.FormatCockroach {\n\t\treq.Principals = append([]string{\"node\"}, req.Principals...)\n\t}\n\n\tsubject := pkix.Name{CommonName: req.Principals[0]}\n\n\tif req.OutputFormat == identityfile.FormatMongo {\n\t\t// Include Organization attribute in MongoDB certificates as well.\n\t\t//\n\t\t// When using X.509 member authentication, MongoDB requires O or OU to\n\t\t// be non-empty so this will make the certs we generate compatible:\n\t\t//\n\t\t// https://docs.mongodb.com/manual/core/security-internal-authentication/#x.509\n\t\t//\n\t\t// The actual O value doesn't matter as long as it matches on all\n\t\t// MongoDB cluster members so set it to the Teleport cluster name\n\t\t// to avoid hardcoding anything.\n\n\t\tclusterNameType, err := req.ClusterAPI.GetClusterName()\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tsubject.Organization = []string{clusterNameType.GetClusterName()}\n\t}\n\n\tif req.Key == nil {\n\t\tkey, err := client.GenerateRSAKey()\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treq.Key = key\n\t}\n\n\tcsr, err := tlsca.GenerateCertificateRequestPEM(subject, req.Key.PrivateKey)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tresp, err := req.ClusterAPI.GenerateDatabaseCert(ctx,\n\t\t&proto.DatabaseCertRequest{\n\t\t\tCSR: csr,\n\t\t\t// Important to include SANs since CommonName has been deprecated\n\t\t\t// since Go 1.15:\n\t\t\t// https://golang.org/doc/go1.15#commonname\n\t\t\tServerNames: req.Principals,\n\t\t\t// Include legacy ServerName for compatibility.\n\t\t\tServerName: req.Principals[0],\n\t\t\tTTL: proto.Duration(req.TTL),\n\t\t\tRequesterName: proto.DatabaseCertRequest_TCTL,\n\t\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treq.Key.TLSCert = resp.Cert\n\treq.Key.TrustedCerts = []auth.TrustedCerts{{\n\t\tClusterName: req.Key.ClusterName,\n\t\tTLSCertificates: resp.CACerts,\n\t}}\n\tfilesWritten, err := identityfile.Write(ctx, identityfile.WriteConfig{\n\t\tOutputPath: req.OutputLocation,\n\t\tKey: req.Key,\n\t\tFormat: req.OutputFormat,\n\t\tOverwriteDestination: req.OutputCanOverwrite,\n\t\tWriter: req.IdentityFileWriter,\n\t\tPassword: req.Password,\n\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn filesWritten, nil\n}", "func (g *CertGenerator) Generate(cert, key string, certType bool) error {\n\tif err := g.fileSystem.MkdirAll(path.Dir(cert), 0750); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tif err := g.fileSystem.MkdirAll(path.Dir(key), 0750); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tcertKey, err := g.GenerateMemCert(certType)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif err := g.write(func() (fsys.File, error) {\n\t\treturn g.fileSystem.Create(cert)\n\t}, cert, certKey.Cert); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif err := g.write(func() (fsys.File, error) {\n\t\treturn g.fileSystem.OpenFile(key, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\t}, key, certKey.Key); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn nil\n}", "func getTLScerts(c, k, ca string) ([]byte, []byte, []byte, error) {\n\tres := [][]byte{}\n\tvar err error\n\tvar a []byte\n\tfor _, l := range []string{c, k, ca} {\n\t\ta, err = ioutil.ReadFile(l)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"getTLScerts failed to load file %s: %s\", l, err)\n\t\t\tbreak\n\t\t}\n\t\tres = append(res, a)\n\t}\n\tif err != nil {\n\t\tisX := false\n\t\thost := \"host\"\n\t\trsaBits := 2048\n\t\tecdsaCurve := \"\"\n\t\tvalidFor := 365 * 24 * time.Hour\n\t\tvalidFrom := \"\"\n\t\tisCA := true\n\t\tlog.Println(\"creating CA\")\n\t\tcacert, cakey, err := internal.Ca(host, rsaBits, ecdsaCurve, validFrom, validFor)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to create certificate: %s\", err)\n\t\t}\n\t\tca_key_pair, err := tls.X509KeyPair(pem.EncodeToMemory(&cacert), pem.EncodeToMemory(&cakey))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to make ca key pair: %s\", err)\n\t\t}\n\t\tlog.Println(\"creating certificate\")\n\t\tisCA = false\n\t\tcert, priv, err := internal.CaSignedCert(cert_common_name, host, rsaBits, ecdsaCurve, validFrom, validFor, isCA, isX, &ca_key_pair)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to make signed cert %s\", err)\n\t\t}\n\t\treturn pem.EncodeToMemory(&cert), pem.EncodeToMemory(&priv), pem.EncodeToMemory(&cacert), nil\n\t}\n\treturn res[0], res[1], res[2], nil\n}", "func (c EasyCert) generateCert(certFile, keyFile string, ca *x509.Certificate, caKey crypto.PrivateKey) error {\n\ttemplate := c.newCertificate()\n\tfor _, h := range c.hosts {\n\t\tif ip := net.ParseIP(h); ip != nil {\n\t\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t\t} else {\n\t\t\ttemplate.DNSNames = append(template.DNSNames, h)\n\t\t\tif template.Subject.CommonName == \"\" {\n\t\t\t\ttemplate.Subject.CommonName = h\n\t\t\t}\n\t\t}\n\t}\n\n\tpriv, err := c.newPrivateKey()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.generateFromTemplate(certFile, keyFile, template, ca, priv, caKey)\n}", "func generateSSCert() []byte {\n\t//create root CA\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, _ := rand.Int(rand.Reader, serialNumberLimit)\n\n\ttmpl := &x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{Organization: []string{\"Yhat, Inc.\"}},\n\t\tSignatureAlgorithm: x509.SHA256WithRSA,\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().Add(time.Hour), // valid for an hour\n\t\tBasicConstraintsValid: true,\n\t}\n\trootKey, _ := rsa.GenerateKey(rand.Reader, 2048)\n\n\ttmpl.IsCA = true\n\ttmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature\n\ttmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}\n\ttmpl.IPAddresses = []net.IP{net.ParseIP(\"127.0.0.1\")}\n\n\t_, rootCertPEM, _ := createCert(tmpl, tmpl, &rootKey.PublicKey, rootKey)\n\treturn rootCertPEM\n}", "func (k *K8sutil) generateConfig(configDir, certsDir, namespace, clusterName string) error {\n\tcaConfig := caconfig{\n\t\tSigning: configSigning{\n\t\t\tDefault: configDefault{\n\t\t\t\tUsages: []string{\n\t\t\t\t\t\"signing\",\n\t\t\t\t\t\"key encipherment\",\n\t\t\t\t\t\"server auth\",\n\t\t\t\t\t\"client auth\",\n\t\t\t\t},\n\t\t\t\tExpiry: \"8760h\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcaCSR := csr{\n\t\tHosts: []string{\n\t\t\t\"localhost\",\n\t\t\tfmt.Sprintf(\"elasticsearch-%s\", clusterName),\n\t\t\tfmt.Sprintf(\"%s.%s\", fmt.Sprintf(\"elasticsearch-%s\", clusterName), namespace),\n\t\t\tfmt.Sprintf(\"%s.%s.svc.cluster.local\", fmt.Sprintf(\"elasticsearch-%s\", clusterName), namespace),\n\t\t},\n\t\tKey: key{\n\t\t\tAlgo: \"rsa\",\n\t\t\tSize: 2048,\n\t\t},\n\t\tNames: []names{\n\t\t\tnames{\n\t\t\t\tC: \"US\",\n\t\t\t\tL: \"Pittsburgh\",\n\t\t\t\tO: \"elasticsearch-operator\",\n\t\t\t\tOU: \"k8s\",\n\t\t\t\tST: \"Pennsylvania\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcaConfigJSON, err := json.Marshal(caConfig)\n\tif err != nil {\n\t\tlogrus.Error(\"json Marshal error : \", err)\n\t\treturn err\n\t}\n\tf, err := os.Create(fmt.Sprintf(\"%s/ca-config.json\", configDir))\n\t_, err = f.Write(caConfigJSON)\n\tif err != nil {\n\t\tlogrus.Error(\"Error creating ca-config.json: \", err)\n\t\treturn err\n\t}\n\n\treqCACSRJSON, _ := json.Marshal(caCSR)\n\tf, err = os.Create(fmt.Sprintf(\"%s/ca-csr.json\", configDir))\n\t_, err = f.Write(reqCACSRJSON)\n\tif err != nil {\n\t\tlogrus.Error(\"Error creating ca-csr.json: \", err)\n\t\treturn err\n\t}\n\n\tfor k, v := range map[string]string{\n\t\t\"node\": \"req-node-csr.json\",\n\t\t\"sgadmin\": \"req-sgadmin-csr.json\",\n\t\t\"kibana\": \"req-kibana-csr.json\",\n\t\t\"cerebro\": \"req-cerebro-csr.json\",\n\t} {\n\n\t\treq := csr{\n\t\t\tCN: k,\n\t\t\tHosts: []string{\n\t\t\t\t\"localhost\",\n\t\t\t\tfmt.Sprintf(\"%s-%s\", k, clusterName),\n\t\t\t\tfmt.Sprintf(\"%s.%s\", fmt.Sprintf(\"%s-%s\", k, clusterName), namespace),\n\t\t\t\tfmt.Sprintf(\"%s.%s.svc.cluster.local\", fmt.Sprintf(\"%s-%s\", k, clusterName), namespace),\n\t\t\t\tfmt.Sprintf(\"elasticsearch-%s\", clusterName),\n\t\t\t},\n\t\t\tKey: key{\n\t\t\t\tAlgo: \"rsa\",\n\t\t\t\tSize: 2048,\n\t\t\t},\n\t\t\tNames: []names{\n\t\t\t\tnames{\n\t\t\t\t\tO: \"autogenerated\",\n\t\t\t\t\tOU: \"elasticsearch cluster\",\n\t\t\t\t\tL: \"operator\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tconfigJSON, _ := json.Marshal(req)\n\t\tf, err := os.Create(fmt.Sprintf(\"%s/%s\", configDir, v))\n\t\t_, err = f.Write(configJSON)\n\t\tif err != nil {\n\t\t\tlogrus.Error(err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (scg *SDKCertGenerator) GenerateCert(cr runtime.Object, service *v1.Service, config *CertConfig) (*v1.Secret, *v1.ConfigMap, *v1.Secret, error) {\n\tif err := verifyConfig(config); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tk, n, ns, err := toKindNameNamespace(cr)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tappSecretName := ToAppSecretName(k, n, config.CertName)\n\tappSecret, err := getAppSecretInCluster(scg.KubeClient, appSecretName, ns)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tcaSecretAndConfigMapName := ToCASecretAndConfigMapName(k, n)\n\n\tvar (\n\t\tcaSecret *v1.Secret\n\t\tcaConfigMap *v1.ConfigMap\n\t)\n\n\tcaSecret, caConfigMap, err = getCASecretAndConfigMapInCluster(scg.KubeClient, caSecretAndConfigMapName, ns)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tif config.CAKey != \"\" && config.CACert != \"\" {\n\t\t// custom CA provided by the user.\n\t\tcustomCAKeyData, err := ioutil.ReadFile(config.CAKey)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error reading CA Key from the given file name: %v\", err)\n\t\t}\n\n\t\tcustomCACertData, err := ioutil.ReadFile(config.CACert)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error reading CA Cert from the given file name: %v\", err)\n\t\t}\n\n\t\tcustomCAKey, err := parsePEMEncodedPrivateKey(customCAKeyData)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error parsing CA Key from the given file name: %v\", err)\n\t\t}\n\n\t\tcustomCACert, err := parsePEMEncodedCert(customCACertData)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error parsing CA Cert from the given file name: %v\", err)\n\t\t}\n\t\tcaSecret, caConfigMap = toCASecretAndConfigmap(customCAKey, customCACert, caSecretAndConfigMapName)\n\t} else if config.CAKey != \"\" || config.CACert != \"\" {\n\t\t// if only one of the custom CA Key or Cert is provided\n\t\treturn nil, nil, nil, ErrCAKeyAndCACertReq\n\t}\n\n\thasAppSecret := appSecret != nil\n\thasCASecretAndConfigMap := caSecret != nil && caConfigMap != nil\n\n\tswitch {\n\tcase hasAppSecret && hasCASecretAndConfigMap:\n\t\treturn appSecret, caConfigMap, caSecret, nil\n\n\tcase hasAppSecret && !hasCASecretAndConfigMap:\n\t\treturn nil, nil, nil, ErrCANotFound\n\n\tcase !hasAppSecret && hasCASecretAndConfigMap:\n\t\t// Note: if a custom CA is passed in my the user it takes preference over an already\n\t\t// generated CA secret and CA configmap that might exist in the cluster\n\t\tcaKey, err := parsePEMEncodedPrivateKey(caSecret.Data[TLSPrivateCAKeyKey])\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tcaCert, err := parsePEMEncodedCert([]byte(caConfigMap.Data[TLSCACertKey]))\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tkey, err := newPrivateKey()\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tcert, err := newSignedCertificate(config, service, key, caCert, caKey)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tappSecret, err := scg.KubeClient.CoreV1().Secrets(ns).Create(toTLSSecret(key, cert, appSecretName))\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\treturn appSecret, caConfigMap, caSecret, nil\n\n\tcase !hasAppSecret && !hasCASecretAndConfigMap:\n\t\t// If no custom CAKey and CACert are provided we have to generate them\n\t\tcaKey, err := newPrivateKey()\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tcaCert, err := newSelfSignedCACertificate(caKey)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\n\t\tcaSecret, caConfigMap := toCASecretAndConfigmap(caKey, caCert, caSecretAndConfigMapName)\n\t\tcaSecret, err = scg.KubeClient.CoreV1().Secrets(ns).Create(caSecret)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tcaConfigMap, err = scg.KubeClient.CoreV1().ConfigMaps(ns).Create(caConfigMap)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tkey, err := newPrivateKey()\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tcert, err := newSignedCertificate(config, service, key, caCert, caKey)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tappSecret, err := scg.KubeClient.CoreV1().Secrets(ns).Create(toTLSSecret(key, cert, appSecretName))\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\treturn appSecret, caConfigMap, caSecret, nil\n\tdefault:\n\t\treturn nil, nil, nil, ErrInternal\n\t}\n}", "func GenKeyCertK8sCA(client clientset.Interface, dnsName,\n\tcaFilePath string, signerName string, approveCsr bool, requestedLifetime time.Duration,\n) ([]byte, []byte, []byte, error) {\n\t// 1. Generate a CSR\n\toptions := util.CertOptions{\n\t\tHost: dnsName,\n\t\tRSAKeySize: keySize,\n\t\tIsDualUse: false,\n\t\tPKCS8Key: false,\n\t}\n\tcsrPEM, keyPEM, err := util.GenCSR(options)\n\tif err != nil {\n\t\tlog.Errorf(\"CSR generation error (%v)\", err)\n\t\treturn nil, nil, nil, err\n\t}\n\tusages := []certv1.KeyUsage{\n\t\tcertv1.UsageDigitalSignature,\n\t\tcertv1.UsageKeyEncipherment,\n\t\tcertv1.UsageServerAuth,\n\t\tcertv1.UsageClientAuth,\n\t}\n\tif signerName == \"\" {\n\t\tsignerName = \"kubernetes.io/legacy-unknown\"\n\t}\n\tcertChain, caCert, err := SignCSRK8s(client, csrPEM, signerName, usages, dnsName, caFilePath, approveCsr, true, requestedLifetime)\n\n\treturn certChain, keyPEM, caCert, err\n}", "func GenCARoot() (*x509.Certificate, *rsa.PrivateKey) {\n\tvar rootTemplate = x509.Certificate{\n\t\tSerialNumber: big.NewInt(1),\n\t\tSubject: pkix.Name{\n\t\t\tCountry: []string{defaults.DefaultX509Country},\n\t\t\tOrganization: []string{defaults.DefaultX509Company},\n\t\t\tCommonName: \"Root CA\",\n\t\t},\n\t\tNotBefore: time.Now().Add(-10 * time.Second),\n\t\tNotAfter: time.Now().AddDate(10, 0, 0),\n\t\tKeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: true,\n\t\tMaxPathLen: 2,\n\t\tIPAddresses: []net.IP{net.ParseIP(\"127.0.0.1\")},\n\t}\n\tpriv, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trootCert := genCert(&rootTemplate, &rootTemplate, &priv.PublicKey, priv)\n\treturn rootCert, priv\n}", "func WriteCerts(w io.Writer, certs []*x509.Certificate) error {\n\tfor _, cert := range certs {\n\t\tif err := WriteCert(w, cert); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func GenerateSignedCertificate(c *cli.Context) error {\n\thost := c.String(\"host\")\n\n\trsaBits := c.Int(\"rsa-bits\")\n\tecdsaCurve := c.String(\"ecdsa-curve\")\n\n\tvalidFrom := c.String(\"start-date\")\n\n\tvalidFor := c.Duration(\"duration\")\n\tisCA := c.Bool(\"ca\")\n\tisX := c.Bool(\"x\")\n\tcname := c.String(\"cname\")\n\n\tcert, priv, err := CaSignedCert(cname, host, rsaBits, ecdsaCurve, validFrom, validFor, isCA, isX, nil)\n\tkeyno := getMaxName()\n\tvar certname = keyno + \".cert\"\n\tvar keyname = keyno + \".key\"\n\n\tcertout, err := os.Create(certname)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to open \"+certname+\" for writing: %s\", err)\n\t}\n\tpem.Encode(certout, &cert)\n\tcertout.Close()\n\tlog.Print(\"written \" + certname + \"\\n\")\n\n\tkeyout, err := os.OpenFile(keyname, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\tlog.Print(\"failed to open \"+keyname+\" for writing:\", err)\n\t\treturn nil\n\t}\n\tpem.Encode(keyout, &priv)\n\tkeyout.Close()\n\tlog.Print(\"written \" + keyname + \"\\n\")\n\treturn nil\n}", "func GenerateCA(commonName string,\n serialNumber int64,\n countryCode string,\n organizationalUnit string,\n algo string,\n ecCurve string) (rootCADER []byte, rootPrivateKeyDER []byte, err error) {\n\n notBefore := time.Now().UTC()\n notAfter := notBefore.AddDate(CAValidity, 0, 0) // (years, months. days)\n\n // Hashing algorithm should match the private key type that signs the certificate.\n // In this case we are self-signing so the key generation algorithm and signature hashing algorithm are both of the same type\n hashingAlgorithm := x509.SHA256WithRSA\n switch strings.ToUpper(algo) {\n case \"RSA\":\n // pass\n case \"ECDSA\":\n hashingAlgorithm = x509.ECDSAWithSHA256\n default:\n return nil, nil, errors.New(\"Unrecognized algorithm, valid options are RSA and ECDSA\")\n }\n\n // https://golang.org/pkg/crypto/x509/#Certificate\n myCACertTemplate := x509.Certificate{\n\n // https://golang.org/pkg/crypto/x509/pkix/#Name\n Subject: pkix.Name{\n CommonName: commonName,\n Country: []string{countryCode},\n Organization: []string{organizationalUnit},\n },\n\n NotBefore: notBefore,\n NotAfter: notAfter,\n SerialNumber: big.NewInt(serialNumber), // returns *big.Int\n KeyUsage: RootCAKeyUsage,\n\n // For CAs we at least want []x509.ExtKeyUsage{x509.ExtKeyUsageAny | x509.KeyUsageCertSign}\n // More info: https://golang.org/pkg/crypto/x509/#ExtKeyUsage\n ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, // this should work\n BasicConstraintsValid: true,\n IsCA: true,\n MaxPathLen: 3, // 1 is enough for our purpose\n SignatureAlgorithm: hashingAlgorithm, // other options are at https://golang.org/pkg/crypto/x509/#SignatureAlgorithm\n }\n\n privKey, pubKey, err := generateKeyPair(algo, ecCurve)\n if err != nil {\n return nil, nil, err\n }\n\n // https://golang.org/pkg/crypto/x509/#CreateCertificate\n // Both the signee and singer are the same template because rootCAs are always self-signed\n rootCADER, err = x509.CreateCertificate(rand.Reader, &myCACertTemplate, &myCACertTemplate, pubKey, privKey)\n if err != nil {\n return nil, nil, err\n }\n\n rootPrivateKeyDER, err = MarshalPrivateKey(privKey)\n\n return rootCADER, rootPrivateKeyDER, err\n}", "func (s *TestServer) CreateCert() error {\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate serial number: %s\", err)\n\t}\n\ttemplate := &x509.Certificate{\n\t\tIsCA: true,\n\t\tSubjectKeyId: []byte{1, 2, 3},\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tCountry: []string{\"Acme Co\"},\n\t\t},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().AddDate(5, 5, 5),\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, template, template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate cert: %s\", err)\n\t}\n\n\tcertOut, err := ioutil.TempFile(\"\", \"test\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer certOut.Close()\n\n\tif err = pem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes}); err != nil {\n\t\tos.Remove(certOut.Name())\n\t\treturn err\n\t}\n\n\tkeyOut, err := ioutil.TempFile(\"\", \"test\")\n\tif err != nil {\n\t\tos.Remove(certOut.Name())\n\t\treturn err\n\t}\n\tdefer keyOut.Close()\n\n\tif err = pem.Encode(keyOut, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {\n\t\tos.Remove(certOut.Name())\n\t\tos.Remove(keyOut.Name())\n\t\treturn err\n\t}\n\n\ts.certFile = certOut.Name()\n\ts.keyFile = keyOut.Name()\n\n\treturn nil\n}", "func (a *AdminKubeConfigCABundle) Generate(deps asset.Parents) error {\n\tvar certs []CertInterface\n\tfor _, asset := range a.Dependencies() {\n\t\tdeps.Get(asset)\n\t\tcerts = append(certs, asset.(CertInterface))\n\t}\n\treturn a.CertBundle.Generate(\"admin-kubeconfig-ca-bundle\", certs...)\n}", "func genCertPair(certFile, keyFile string) error {\n\tlog.Infof(\"Generating TLS certificates...\")\n\n\torg := \"dcrdex autogenerated cert\"\n\tvalidUntil := time.Now().Add(10 * 365 * 24 * time.Hour)\n\tcert, key, err := certgen.NewTLSCertPair(elliptic.P521(), org,\n\t\tvalidUntil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write cert and key files.\n\tif err = ioutil.WriteFile(certFile, cert, 0644); err != nil {\n\t\treturn err\n\t}\n\tif err = ioutil.WriteFile(keyFile, key, 0600); err != nil {\n\t\tos.Remove(certFile)\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Done generating TLS certificates\")\n\treturn nil\n}", "func (c *IdentityConfig) CAServerCerts(org string) ([][]byte, error) {\n\tnetworkConfig, err := c.networkConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcaConfig, err := c.getCAConfig(networkConfig, org)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar serverCerts [][]byte\n\t//check for pems first\n\tpems := caConfig.TLSCACerts.Pem\n\tif len(pems) > 0 {\n\t\tserverCerts = make([][]byte, len(pems))\n\t\tfor i, pem := range pems {\n\t\t\tserverCerts[i] = []byte(pem)\n\t\t}\n\t\treturn serverCerts, nil\n\t}\n\n\t//check for files if pems not found\n\tcertFiles := strings.Split(caConfig.TLSCACerts.Path, \",\")\n\tserverCerts = make([][]byte, len(certFiles))\n\tfor i, certPath := range certFiles {\n\t\tbytes, err := ioutil.ReadFile(pathvar.Subst(certPath))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to load pem bytes from path %s\", certPath)\n\t\t}\n\t\tserverCerts[i] = bytes\n\t}\n\treturn serverCerts, nil\n}", "func (c *MTLSCerts) generate(length int) error {\n\tcaPK, caCertBytes, err := c.genCertAndPK(length, &c.caCert, nil, nil)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tc.CACert = &keyPair{caPK, caCertBytes}\n\n\tserverPK, serverCertBytes, err := c.genCertAndPK(length, &c.serverCert, &c.caCert, caPK)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tc.ServerCert = &keyPair{serverPK, serverCertBytes}\n\n\tclientPK, clientCertBytes, err := c.genCertAndPK(length, &c.clientCert, &c.caCert, caPK)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tc.ClientCert = &keyPair{clientPK, clientCertBytes}\n\n\treturn nil\n}", "func MakeIntermediateCertificate(config *Config, inCertPath string, inKeyPath string, outPath string, outKeyPath string) (uint16, error) {\n\tsigner, err := getSigner(config.rand(), config.SignatureAlgorithm, config.EndEntity || config.ForDC)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tpriv, pub, err := signer.GenerateKey()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(config.rand(), serialNumberLimit)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ttpl := &x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"tls-interop-runner development certificate\"},\n\t\t\tOrganizationalUnit: []string{outPath},\n\t\t},\n\n\t\tNotBefore: config.ValidFrom,\n\t\tNotAfter: config.ValidFrom.Add(config.ValidFor),\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t}\n\n\tfor _, h := range config.Hostnames {\n\t\tif ip := net.ParseIP(h); ip != nil {\n\t\t\ttpl.IPAddresses = append(tpl.IPAddresses, ip)\n\t\t} else if email, err := mail.ParseAddress(h); err == nil && email.Address == h {\n\t\t\ttpl.EmailAddresses = append(tpl.EmailAddresses, h)\n\t\t} else if uriName, err := url.Parse(h); err == nil && uriName.Scheme != \"\" && uriName.Host != \"\" {\n\t\t\ttpl.URIs = append(tpl.URIs, uriName)\n\t\t} else {\n\t\t\ttpl.DNSNames = append(tpl.DNSNames, h)\n\t\t}\n\t}\n\n\tif config.ForClient {\n\t\ttpl.ExtKeyUsage = append(tpl.ExtKeyUsage, x509.ExtKeyUsageClientAuth)\n\t}\n\n\tif config.ForDC {\n\t\ttpl.ExtraExtensions = append(tpl.ExtraExtensions, pkix.Extension{\n\t\t\tId: asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44},\n\t\t\tCritical: false,\n\t\t\tValue: nil,\n\t\t})\n\t}\n\n\tparentCertPEMBlock, err := ioutil.ReadFile(filepath.Join(inCertPath))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tparentCertDERBlock, _ := pem.Decode(parentCertPEMBlock)\n\tif parentCertDERBlock == nil || parentCertDERBlock.Type != \"CERTIFICATE\" {\n\t\treturn 0, errors.New(\"failed to read input certificate: unexpected content\")\n\t}\n\tparentCert, err := x509.ParseCertificate(parentCertDERBlock.Bytes)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tparentKeyPEMBlock, err := ioutil.ReadFile(filepath.Join(inKeyPath))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tparentKeyDERBlock, _ := pem.Decode(parentKeyPEMBlock)\n\tif parentKeyDERBlock == nil || parentKeyDERBlock.Type != \"PRIVATE KEY\" {\n\t\treturn 0, errors.New(\"failed to read input key: unexpected content\")\n\t}\n\tparentKey, err := x509.ParsePKCS8PrivateKey(parentKeyDERBlock.Bytes)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tcert, err := x509.CreateCertificate(config.rand(), tpl, parentCert, pub, parentKey)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tcertPEM := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: cert})\n\tprivDER, err := x509.MarshalPKCS8PrivateKey(priv)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tprivPEM := pem.EncodeToMemory(&pem.Block{Type: \"PRIVATE KEY\", Bytes: privDER})\n\n\tif outPath == outKeyPath {\n\t\terr = ioutil.WriteFile(outKeyPath, append(certPEM, privPEM...), 0600)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t} else {\n\t\terr = ioutil.WriteFile(outPath, certPEM, 0644)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\terr = ioutil.WriteFile(outKeyPath, privPEM, 0600)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn signer.algorithmID, nil\n}", "func getCertificates(certFile string, keyFile string) (certFilePath string, keyFilePath string) {\n\tif runtime.GOOS == \"windows\" {\n\t\tcertFilePath, _ = filepath.Abs(filepath.Join(\"https-server\", certFile))\n\t\tkeyFilePath, _ = filepath.Abs(filepath.Join(\"https-server\", keyFile))\n\t} else {\n\t\tcertFilePath, _ = filepath.Abs(certFile)\n\t\tkeyFilePath, _ = filepath.Abs(keyFile)\n\t}\n\treturn\n}", "func (c *Certinator) GenerateCaCert(name string, cn string, exported bool) (secret *api.Secret, err error) {\n\tdata := map[string]interface{}{\n\t\t\"ttl\": DEFAULT_CA_MAX_LEASE,\n\t\t\"common_name\": cn,\n\t}\n\n\tvar path string\n\tif exported {\n\t\tpath = fmt.Sprintf(\"%s/root/generate/exported\", name)\n\t} else {\n\t\tpath = fmt.Sprintf(\"%s/root/generate/internal\", name)\n\t}\n\n\tsecret, err = c.Client.Logical().Write(path, data)\n\tif err != nil {\n\t\terr = errors.Wrapf(err, \"failed creating root cert for CA %q\", name)\n\t\treturn secret, err\n\t}\n\n\treturn secret, err\n}", "func makeCert(t *testing.T, domains []string, expiry time.Time) []byte {\n\ttc := makeTLSCert(t, domains, expiry)\n\n\tcertPEM := &bytes.Buffer{}\n\tif err := pem.Encode(certPEM, &pem.Block{Type: \"CERTIFICATE\", Bytes: tc.Leaf.Raw}); err != nil {\n\t\tt.Fatalf(\"Failed to write data to cert.pem: %s\", err)\n\t}\n\n\treturn certPEM.Bytes()\n}", "func GenerateCertFile(appId string, teamId string, bundlerId string) {\n\t// mutex to lock\n\tiOSLock.Lock()\n\tdefer iOSLock.Unlock()\n\n\tcurDir := getcurdir()\n\t/*\n\t\tfile, err := ioutil.ReadFile(curDir + \"/../app_site/apple-app-site-association-unsigned\")\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"[Cert Resource] Find file <apple-app-site-association-unsigned> failed! Err Msg=%s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tvar universallinkJson UniversalLinkJson\n\t\tjson.Unmarshal(file, &universallinkJson)\n\n\t\t// add new element\n\t\tcomboId := teamId + \".\" + bundlerId\n\t\tpath := \"/d/\" + appId + \"/*\"\n\t\ts := Detail{AppID: comboId, Paths: []string{path}}\n\t\tuniversallinkJson.Applinks.Details = append(universallinkJson.Applinks.Details, s)\n\n\t\tnewResult, err := json.Marshal(universallinkJson)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"[Cert Resource] Marshal upsert info failed! Err Msg=%v\", err)\n\t\t\treturn\n\t\t}\n\t*/\n\n\t// FIXME: when two many apps\n\tresult, err := storage.MongoAS.GetAppleAppSiteAssociationInfo()\n\t/*\n\t\thttperror := errorutil.ProcessMongoError(f, err)\n\t\tif httperror {\n\t\t\treturn\n\t\t}*/\n\tr, err := json.Marshal(result)\n\tlog.Debugf(\"db result: %s\\n\", r)\n\t// Merge mutiple paths of the same app(same teamid & bundleid)\n\trawlist := map[string]interface{}{}\n\n\tapplist := []map[string]interface{}{}\n\n\tfor i, _ := range result {\n\t\tkey := result[i][\"iosteamid\"] + \".\" + result[i][\"iosbundler\"]\n\t\tfullPath := \"/d/\" + result[i][\"appid\"] + \"/*\"\n\t\tminiPath := \"/d/\" + result[i][\"shortid\"] + \"/*\"\n\n\t\tif _, ok := rawlist[key]; !ok {\n\t\t\trawlist[key] = []string{}\n\t\t}\n\n\t\tif result[i][\"shortid\"] != \"\" {\n\t\t\trawlist[key] = append(rawlist[key].([]string), miniPath)\n\t\t}\n\t\trawlist[key] = append(rawlist[key].([]string), fullPath)\n\t}\n\n\tfor k, v := range rawlist {\n\t\tapp := map[string]interface{}{}\n\t\tapp[\"appID\"] = k\n\t\tapp[\"paths\"] = v\n\t\tapplist = append(applist, app)\n\t}\n\tujson := map[string]interface{}{\n\t\t\"applinks\": map[string]interface{}{\n\t\t\t\"apps\": []string{},\n\t\t\t\"details\": applist,\n\t\t},\n\t}\n\tnewResult, err := json.Marshal(ujson)\n\tlog.Infof(\"The generated apple-app-site-association file: %s\\n\", newResult)\n\terr = ioutil.WriteFile(curDir+\"/../app_site/apple-app-site-association-unsigned\", newResult, 0666)\n\tif err != nil {\n\t\tlog.Errorf(\"[Cert Resource] Failed to generate new json file, error: %v\\n\", err)\n\t\treturn\n\t}\n\n\t// write with plain json file\n\terr = ioutil.WriteFile(curDir+\"/../app_site/apple-app-site-association\", newResult, 0666)\n\tif err != nil {\n\t\tlog.Errorf(\"[Cert Resource] Failed to generate apple-app-site-association file, error: %v\\n\", err)\n\t\treturn\n\t}\n\t/*\n\t\t* No need to sign:\n\t\t* https://developer.apple.com/library/ios/documentation/General/Conceptual/AppSearch/UniversalLinks.html\n\t\tarv := []string{\"smime\", \"-sign\", \"-nodetach\", \"-in\", curDir + \"/../app_site/apple-app-site-association-unsigned\", \"-out\", curDir + \"/../app_site/apple-app-site-association\", \"-outform\", \"DER\", \"-inkey\", curDir + \"/fds.so.key\", \"-signer\", curDir + \"/fds.so.crt\"}\n\t\tcmd := exec.Command(\"openssl\", arv...)\n\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tlog.Errorf(\"[Cert Resource] Failed to sign the json file! Err Msg=%v\", err)\n\t\t} else {\n\t\t\tlog.Infof(\"[Cert Resource] Success to sign the json file!\")\n\t\t}\n\t*/\n}", "func GenerateCertificate() ([]byte, []byte, error) {\n\tprivKey, pubKey, err := GenerateKeyPair()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Vecosy test\"},\n\t\t},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().Add(1 * time.Hour),\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tDNSNames: []string{\"localhost\"},\n\t\tIPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)},\n\t}\n\tcerBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, pubKey, privKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpemPrivKey := pem.EncodeToMemory(\n\t\t&pem.Block{\n\t\t\tType: \"RSA PRIVATE KEY\",\n\t\t\tBytes: x509.MarshalPKCS1PrivateKey(privKey),\n\t\t},\n\t)\n\tpemCert := pem.EncodeToMemory(\n\t\t&pem.Block{\n\t\t\tType: \"CERTIFICATE\",\n\t\t\tBytes: cerBytes,\n\t\t},\n\t)\n\n\treturn pemCert, pemPrivKey, nil\n}", "func WriteToFiles(crt *x509.Certificate, key interface{}, certFile string, keyFile string) (err error) {\n\tcertOut, err := os.Create(certFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := pem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: crt.Raw}); err != nil {\n\t\treturn err\n\t}\n\tif err := certOut.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tkeyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar privBytes []byte\n\tswitch keyTyped := key.(type) {\n\tcase *rsa.PrivateKey:\n\t\tprivBytes, err = x509.MarshalPKCS8PrivateKey(keyTyped)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := pem.Encode(keyOut, &pem.Block{Type: \"PRIVATE KEY\", Bytes: privBytes}); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *ecdsa.PrivateKey:\n\t\tprivBytes, err = x509.MarshalECPrivateKey(keyTyped)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsecp256r1, err := asn1.Marshal(asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := pem.Encode(keyOut, &pem.Block{Type: \"EC PARAMETERS\", Bytes: secp256r1}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := pem.Encode(keyOut, &pem.Block{Type: \"EC PRIVATE KEY\", Bytes: privBytes}); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"unknown key format\")\n\t}\n\treturn keyOut.Close()\n}", "func generateNewCertsForPeer(network *nwo.Network, tempCryptoDir string, peer *nwo.Peer) {\n\tsess, err := network.Cryptogen(commands.Generate{\n\t\tConfig: network.CryptoConfigPath(),\n\t\tOutput: tempCryptoDir,\n\t})\n\tExpect(err).NotTo(HaveOccurred())\n\tEventually(sess, network.EventuallyTimeout).Should(gexec.Exit(0))\n\n\tBy(\"copying the new msp certs for the peer to the original crypto dir\")\n\toldPeerMSPPath := network.PeerLocalMSPDir(peer)\n\torg := network.Organization(peer.Organization)\n\ttempPeerMSPPath := filepath.Join(\n\t\ttempCryptoDir,\n\t\t\"peerOrganizations\",\n\t\torg.Domain,\n\t\t\"peers\",\n\t\tfmt.Sprintf(\"%s.%s\", peer.Name, org.Domain),\n\t\t\"msp\",\n\t)\n\tos.RemoveAll(oldPeerMSPPath)\n\terr = exec.Command(\"cp\", \"-r\", tempPeerMSPPath, oldPeerMSPPath).Run()\n\tExpect(err).NotTo(HaveOccurred())\n\n\t// This lets us keep the old user certs for the org for any peers still remaining in the org\n\t// using the old certs\n\tBy(\"copying the new Admin user cert to the original user certs dir as Admin2\")\n\toldAdminUserPath := filepath.Join(\n\t\tnetwork.RootDir,\n\t\t\"crypto\",\n\t\t\"peerOrganizations\",\n\t\torg.Domain,\n\t\t\"users\",\n\t\tfmt.Sprintf(\"Admin2@%s\", org.Domain),\n\t)\n\ttempAdminUserPath := filepath.Join(\n\t\ttempCryptoDir,\n\t\t\"peerOrganizations\",\n\t\torg.Domain,\n\t\t\"users\",\n\t\tfmt.Sprintf(\"Admin@%s\", org.Domain),\n\t)\n\tos.RemoveAll(oldAdminUserPath)\n\terr = exec.Command(\"cp\", \"-r\", tempAdminUserPath, oldAdminUserPath).Run()\n\tExpect(err).NotTo(HaveOccurred())\n\t// We need to rename the signcert from Admin to Admin2 as well\n\terr = os.Rename(\n\t\tfilepath.Join(oldAdminUserPath, \"msp\", \"signcerts\", fmt.Sprintf(\"Admin@%s-cert.pem\", org.Domain)),\n\t\tfilepath.Join(oldAdminUserPath, \"msp\", \"signcerts\", fmt.Sprintf(\"Admin2@%s-cert.pem\", org.Domain)),\n\t)\n\tExpect(err).NotTo(HaveOccurred())\n}", "func generateCert(t *testing.T) (string, string) {\n\tt.Helper()\n\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnotBefore := time.Now().Add(-5 * time.Minute)\n\tnotAfter := notBefore.Add(time.Hour)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar certOut bytes.Buffer\n\tif err := pem.Encode(&certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar keyOut bytes.Buffer\n\tif err := pem.Encode(&keyOut, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn certOut.String(), keyOut.String()\n}", "func (rcc *rotateCertsCmd) distributeCerts() (err error) {\n\tupload := func(files fileMap, node *ssh.RemoteHost) error {\n\t\tfor _, file := range files {\n\t\t\tvar co string\n\t\t\tif co, err = ssh.CopyToRemote(context.Background(), node, file); err != nil {\n\t\t\t\tlog.Debugf(\"Remote command output: %s\", co)\n\t\t\t\treturn errors.Wrap(err, \"uploading certificate\")\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tmasterCerts, linuxCerts, windowsCerts, e := getFilesToDistribute(rcc.cs, \"/etc/kubernetes/rotate-certs/certs\")\n\tif e != nil {\n\t\treturn errors.Wrap(e, \"collecting files to distribute\")\n\t}\n\tfor _, node := range rcc.nodes {\n\t\tlog.Debugf(\"Uploading certificates to node %s\", node.URI)\n\t\tif isMaster(node) {\n\t\t\terr = upload(masterCerts, node)\n\t\t} else if isLinuxAgent(node) {\n\t\t\terr = upload(linuxCerts, node)\n\t\t} else if isWindowsAgent(node) {\n\t\t\terr = upload(windowsCerts, node)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func NewCert(pemcsr []byte, pemCA []byte, caKey *PrivateKey, opts *CertOption) ([]byte, error) {\n\tcertOpts, err := opts.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// parse the CA's certificate\n\tcaCert, err := ParseCert(pemCA)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !caCert.IsCA {\n\t\treturn nil, fmt.Errorf(\"only CA can sign certificates\")\n\t}\n\n\t// parse and validate CSR\n\tcsr, err := ParseCSR(pemcsr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = csr.CheckSignature(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// random serial\n\tserialNumber, err := newRandomCertSerial()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create client certificate template\n\tclientTemplate := x509.Certificate{\n\t\tSignature: csr.Signature,\n\t\tSignatureAlgorithm: csr.SignatureAlgorithm,\n\t\tPublicKeyAlgorithm: csr.PublicKeyAlgorithm,\n\t\tPublicKey: csr.PublicKey,\n\n\t\tSerialNumber: serialNumber,\n\t\tIssuer: caCert.Subject,\n\t\tSubject: csr.Subject,\n\t\tNotBefore: certOpts.notBefore,\n\t\tNotAfter: certOpts.notAfter,\n\n\t\tExtKeyUsage: []x509.ExtKeyUsage{},\n\n\t\tDNSNames: []string{},\n\t\tEmailAddresses: []string{},\n\t\tURIs: []*url.URL{},\n\t\tIPAddresses: []net.IP{},\n\t}\n\n\t// CA must have these\n\tif certOpts.isCA {\n\t\t// ECDSA, ED25519 and RSA subject keys should have the DigitalSignature\n\t\t// KeyUsage bits set in the x509.Certificate template\n\t\tcaKeyUsage := x509.KeyUsageDigitalSignature\n\t\tfor _, v := range certOpts.keyUsage {\n\t\t\tif v == x509.KeyUsageDigitalSignature || v == x509.KeyUsageCertSign {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcaKeyUsage |= v\n\t\t}\n\t\tcaKeyUsage |= x509.KeyUsageCertSign\n\n\t\tclientTemplate.IsCA = true\n\t\tclientTemplate.BasicConstraintsValid = true\n\t\tclientTemplate.KeyUsage = caKeyUsage\n\t} else {\n\t\tvar keyUsage x509.KeyUsage\n\t\tif len(certOpts.keyUsage) == 0 {\n\t\t\t// default key usage\n\t\t\tkeyUsage = x509.KeyUsageDigitalSignature\n\t\t} else {\n\t\t\tfor _, v := range certOpts.keyUsage {\n\t\t\t\tkeyUsage |= v\n\t\t\t}\n\t\t}\n\t\tclientTemplate.KeyUsage = keyUsage\n\t}\n\n\t// add any custom eku\n\tif len(certOpts.extKeyUsage) != 0 {\n\t\tfor _, v := range certOpts.extKeyUsage {\n\t\t\tclientTemplate.ExtKeyUsage = append(clientTemplate.ExtKeyUsage, v)\n\t\t}\n\t} else {\n\t\t// default eku\n\t\tclientTemplate.ExtKeyUsage = []x509.ExtKeyUsage{\n\t\t\tx509.ExtKeyUsageServerAuth,\n\t\t\tx509.ExtKeyUsageClientAuth,\n\t\t}\n\t}\n\n\t// add SAN, email, IP and URLs if any\n\tif len(certOpts.san) != 0 {\n\t\tclientTemplate.DNSNames = certOpts.san\n\t}\n\tif len(certOpts.emails) != 0 {\n\t\tclientTemplate.EmailAddresses = certOpts.emails\n\t}\n\tif len(certOpts.urls) != 0 {\n\t\tclientTemplate.URIs = certOpts.urls\n\t}\n\tif len(certOpts.ipaddrs) != 0 {\n\t\tclientTemplate.IPAddresses = certOpts.ipaddrs\n\t}\n\n\tvar derBytes []byte\n\tif caKey.Algorithm().IsEC() {\n\t\tpriv, err := caKey.EC()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tderBytes, err = x509.CreateCertificate(rand.Reader, &clientTemplate, caCert, priv.Public(), priv)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if caKey.Algorithm().IsRSA() {\n\t\tpriv, err := caKey.RSA()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tderBytes, err = x509.CreateCertificate(rand.Reader, &clientTemplate, caCert, priv.Public(), priv)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if caKey.Algorithm().IsEd25519() {\n\t\tpriv, err := caKey.Ed25519()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tderBytes, err = x509.CreateCertificate(rand.Reader, &clientTemplate, caCert, priv.Public(), priv)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tpanic(\"internal consistency: private_key_unexpected_algo\")\n\t}\n\n\tcertPEM := new(bytes.Buffer)\n\terr = pem.Encode(certPEM, &pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: derBytes,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn certPEM.Bytes(), nil\n}", "func GenerateSecretForCerts(name string, namespace string) (*v1.Secret, error) {\n\tsecretTemplate, err := template.New(\"\").Funcs(sprig.TxtFuncMap()).Parse(certsSecretTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuffer := bytes.NewBuffer(nil)\n\n\terr = secretTemplate.Execute(buffer, map[string]string{\"cn\": name, \"externalDNS\": name + \".\" + namespace})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar secret v1.Secret\n\n\terr = yaml.Unmarshal(buffer.Bytes(), &secret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecret.Name = name + \"-tls\"\n\tsecret.Namespace = namespace\n\n\treturn &secret, nil\n}", "func (o BackendCredentialsOutput) Certificates() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BackendCredentials) []string { return v.Certificates }).(pulumi.StringArrayOutput)\n}", "func MakeRootCertificate(config *Config, outPath string, outKeyPath string) (uint16, error) {\n\tsigner, err := getSigner(config.rand(), config.SignatureAlgorithm, config.EndEntity || config.ForDC)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tpriv, pub, err := signer.GenerateKey()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tspkiASN1, err := x509.MarshalPKIXPublicKey(pub)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar spki struct {\n\t\tAlgorithm pkix.AlgorithmIdentifier\n\t\tSubjectPublicKey asn1.BitString\n\t}\n\t_, err = asn1.Unmarshal(spkiASN1, &spki)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tskid := sha1.Sum(spki.SubjectPublicKey.Bytes)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(config.rand(), serialNumberLimit)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ttpl := &x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"tls-interop-runner development CA\"},\n\t\t\tOrganizationalUnit: []string{\"tls-interop-runner\"},\n\n\t\t\t// The CommonName is required by iOS to show the certificate in the\n\t\t\t// \"Certificate Trust Settings\" menu.\n\t\t\t// https://github.com/FiloSottile/mkcert/issues/47\n\t\t\tCommonName: \"tls-interop-runner\",\n\t\t},\n\t\tSubjectKeyId: skid[:],\n\n\t\tNotBefore: config.ValidFrom,\n\t\tNotAfter: config.ValidFrom.Add(config.ValidFor),\n\n\t\tKeyUsage: x509.KeyUsageCertSign,\n\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: true,\n\t\tMaxPathLenZero: true,\n\t}\n\n\tcert, err := x509.CreateCertificate(config.rand(), tpl, tpl, pub, priv)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tprivDER, err := x509.MarshalPKCS8PrivateKey(priv)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = ioutil.WriteFile(outKeyPath, pem.EncodeToMemory(\n\t\t&pem.Block{Type: \"PRIVATE KEY\", Bytes: privDER}), 0600)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = ioutil.WriteFile(outPath, pem.EncodeToMemory(\n\t\t&pem.Block{Type: \"CERTIFICATE\", Bytes: cert}), 0644)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn signer.algorithmID, nil\n}", "func (p *OAuthProxy) Certs(rw http.ResponseWriter, _ *http.Request) {\n\trw.Write(p.publicCertsJSON)\n}", "func templateFunctionGenerateCertificate(key, cn, lifetime, usage string, sans []interface{}, caKey, caCert interface{}) (string, error) {\n\tglog.V(log.LevelDebug).Infof(\"generateCertificate: key '%s', cn '%s', lifetime '%s', usage '%s', sans %v, ca key '%s', ca cert '%s'\", key, cn, lifetime, usage, sans, caKey, caCert)\n\n\tduration, err := time.ParseDuration(lifetime)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar caKeyTyped []byte\n\n\tif caKey != nil {\n\t\tt, ok := caKey.(string)\n\t\tif !ok {\n\t\t\treturn \"\", errors.NewConfigurationError(\"CA key not a string\")\n\t\t}\n\n\t\tcaKeyTyped = []byte(t)\n\t}\n\n\tvar caCertTyped []byte\n\n\tif caCert != nil {\n\t\tt, ok := caCert.(string)\n\t\tif !ok {\n\t\t\treturn \"\", errors.NewConfigurationError(\"CA certificate not a string\")\n\t\t}\n\n\t\tcaCertTyped = []byte(t)\n\t}\n\n\tsansTyped := make([]string, len(sans))\n\n\tfor index, san := range sans {\n\t\tt, ok := san.(string)\n\t\tif !ok {\n\t\t\treturn \"\", errors.NewConfigurationError(\"SAN %v not a strings\", san)\n\t\t}\n\n\t\tsansTyped[index] = t\n\t}\n\n\tcert, err := util.GenerateCertificate([]byte(key), cn, duration, util.CertificateUsage(usage), sansTyped, caKeyTyped, caCertTyped)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvalue := string(cert)\n\n\tglog.V(log.LevelDebug).Infof(\"generateCertificate: value '%v'\", value)\n\n\treturn value, nil\n}", "func makeCertChain() (eePrivKey *ecdsa.PrivateKey, chain []*x509.Certificate, err error) {\n\trootKeyName := []byte(fmt.Sprintf(\"root%d\", time.Now().UnixNano()))\n\trootPriv, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n\tif err != nil {\n\t\treturn\n\t}\n\tcaTpl := &x509.Certificate{\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Mozilla\"},\n\t\t\tCountry: []string{\"US\"},\n\t\t\tProvince: []string{\"CA\"},\n\t\t\tLocality: []string{\"Mountain View\"},\n\t\t},\n\t\tNotBefore: time.Now().AddDate(0, -2, -2), // start 2 months and 2 days ago\n\t\tNotAfter: time.Now().AddDate(30, 0, 0), // valid for 30 years\n\t\tSignatureAlgorithm: x509.ECDSAWithSHA384,\n\t\tIsCA: true,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},\n\t\tKeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature,\n\t\tBasicConstraintsValid: true,\n\t}\n\tcaTpl.SerialNumber = big.NewInt(time.Now().UnixNano())\n\tcaTpl.Subject.CommonName = string(rootKeyName)\n\trootCertBytes, err := x509.CreateCertificate(\n\t\trand.Reader, caTpl, caTpl, rootPriv.Public(), rootPriv)\n\tif err != nil {\n\t\treturn\n\t}\n\trootCert, err := x509.ParseCertificate(rootCertBytes)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinterKeyName := []byte(fmt.Sprintf(\"inter%d\", time.Now().UnixNano()))\n\tinterPriv, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n\tif err != nil {\n\t\treturn\n\t}\n\tcaTpl.SerialNumber = big.NewInt(time.Now().UnixNano())\n\tcaTpl.Subject.CommonName = string(interKeyName)\n\tcaTpl.NotBefore = time.Now().AddDate(0, -2, -1) // start 2 months and 1 day ago\n\tcaTpl.NotAfter = time.Now().AddDate(10, 0, 0) // valid for 10 years\n\tinterCertBytes, err := x509.CreateCertificate(\n\t\trand.Reader, caTpl, rootCert, interPriv.Public(), rootPriv)\n\tif err != nil {\n\t\treturn\n\t}\n\tinterCert, err := x509.ParseCertificate(interCertBytes)\n\tif err != nil {\n\t\treturn\n\t}\n\n\teeKeyName := []byte(fmt.Sprintf(\"endentity%d\", time.Now().UnixNano()))\n\teePrivKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tif err != nil {\n\t\treturn\n\t}\n\tcaTpl.SerialNumber = big.NewInt(time.Now().UnixNano())\n\tcaTpl.Subject.CommonName = string(eeKeyName)\n\tcaTpl.NotBefore = time.Now().AddDate(0, -2, -1) // start 2 months and 1 day ago\n\tcaTpl.NotAfter = time.Now().AddDate(1, 0, 0) // valid for 1 years\n\teeCertBytes, err := x509.CreateCertificate(\n\t\trand.Reader, caTpl, interCert, eePrivKey.Public(), interPriv)\n\tif err != nil {\n\t\treturn\n\t}\n\teeCert, err := x509.ParseCertificate(eeCertBytes)\n\tif err != nil {\n\t\treturn\n\t}\n\tchain = []*x509.Certificate{eeCert, interCert, rootCert}\n\treturn\n}", "func Exec(opts ...Opt) (Cert, error) {\n\tvar p params\n\tfor _, o := range opts {\n\t\to(&p)\n\t}\n\tif len(p.domains) == 0 {\n\t\treturn Cert{}, ErrNoDomains\n\t}\n\n\t// Ask mkcert to generate the certificates.\n\tvar args []string\n\tif p.certFile != \"\" {\n\t\targs = append(args, \"-cert-file\", p.certFile)\n\t}\n\tif p.keyFile != \"\" {\n\t\targs = append(args, \"-key-file\", p.keyFile)\n\t}\n\tcmd := exec.Command(\"mkcert\", append(args, p.domains...)...)\n\tcmd.Dir = p.dir\n\tout, err := cmd.CombinedOutput()\n\n\tif err != nil {\n\t\tif perr, ok := err.(*exec.ExitError); ok {\n\t\t\tperr.Stderr = out\n\t\t}\n\t\treturn Cert{}, fmt.Errorf(\"mkcert: %w\", err)\n\t}\n\n\tcertFile, keyFile := parseFiles(out)\n\tcert := Cert{\n\t\tCARoot: parseCA(out),\n\t\tTrusted: parseTrusted(out),\n\t\tDomains: p.domains,\n\t\tFile: certFile,\n\t\tKeyFile: keyFile,\n\t}\n\tif cmd.Dir != \"\" {\n\t\tif !filepath.IsAbs(cert.File) {\n\t\t\tcert.File = filepath.Join(cmd.Dir, cert.File)\n\t\t}\n\t\tif !filepath.IsAbs(cert.KeyFile) {\n\t\t\tcert.KeyFile = filepath.Join(cmd.Dir, cert.KeyFile)\n\t\t}\n\t}\n\tif !cert.Trusted && p.requireTrust {\n\t\terr = fmt.Errorf(\"mkcert: CA at %s not trusted, run mkcert -install\", cert.CARoot)\n\t}\n\treturn cert, err\n}", "func (a *ACMEInstance) ExportCert(filenameCertPEM, filenamePrivateKey, filenameCert string) {\n\n\t//cert.pem\n\tfmt.Println(\"Your new Cert is here.....\")\n\tfmt.Println(a.certificate)\n\tf, err := os.Create(filenameCertPEM)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Send: %v\\n\", err)\n\t\treturn\n\t}\n\tdefer f.Close()\n\tf.WriteString(a.certificate)\n\n\t//privateKey.pem\n\tfmt.Println(\"the used Private Key was...\")\n\tfmt.Println(a.clientKey.PrivateKeyPEM)\n\tf, err = os.Create(filenamePrivateKey)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Send: %v\\n\", err)\n\t\treturn\n\t}\n\tdefer f.Close()\n\tf.Write(a.clientKey.PrivateKeyPEM.Bytes())\n\n\t//cert.crt\n\tf, err = os.Create(filenameCert)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Send: %v\\n\", err)\n\t\treturn\n\t}\n\tdefer f.Close()\n\tf.WriteString(a.certificate)\n\n\treturn\n}", "func TemplateCA(cn string) *x509.Certificate {\n\tca := Template(cn)\n\tca.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign | x509.KeyUsageCRLSign\n\tca.IsCA = true\n\tca.MaxPathLen = certMaxPathLen\n\tca.MaxPathLenZero = true\n\treturn ca\n}", "func mkCert(a *PkiArgs) (pem.Block, pem.Block, error) {\n\tisX := a.isX\n\tisCA := a.isCA\n\tvalidFor := a.validFor\n\tvalidFrom := a.validFrom\n\tecdsaCurve := a.ecdsaCurve\n\thost := a.host\n\trsaBits := a.rsaBits\n\tparent := a.parent\n\tif len(host) == 0 {\n\t\tlog.Fatalf(\"Missing required --host parameter\")\n\t}\n\n\tpriv, err := mkKey(ecdsaCurve, rsaBits)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to generate private key: %s\", err)\n\t}\n\n\tvar notBefore time.Time\n\tif len(validFrom) == 0 {\n\t\tnotBefore = time.Now()\n\t} else {\n\t\tnotBefore, err = time.Parse(\"Jan 2 15:04:05 2006\", validFrom)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Failed to parse creation date: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tnotAfter := notBefore.Add(validFor)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to generate serial number: %s\", err)\n\t}\n\n\ttemplate := &x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"xyz\"},\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\tif isX {\n\t\ttemplate.Subject.OrganizationalUnit = []string{\"X\"}\n\t}\n\n\thosts := strings.Split(host, \",\")\n\tfor _, h := range hosts {\n\t\tif ip := net.ParseIP(h); ip != nil {\n\t\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t\t} else {\n\t\t\ttemplate.DNSNames = append(template.DNSNames, h)\n\t\t}\n\t}\n\n\tif isCA {\n\t\ttemplate.IsCA = true\n\t\ttemplate.KeyUsage |= x509.KeyUsageCertSign\n\t}\n\n\tif parent == nil {\n\t\tparent = template\n\t}\n\tderBytes, err := x509.CreateCertificate(rand.Reader, template, parent, publicKey(priv), priv)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create certificate: %s\", err)\n\t}\n\n\treturn pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes}, *pemBlockForKey(priv), nil\n\n}", "func generateTestCert(host string) error {\n\tcertPath := getPublicCertFile()\n\tkeyPath := getPrivateKeyFile()\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Minio Test Cert\"},\n\t\t},\n\t\tNotBefore: UTCNow(),\n\t\tNotAfter: UTCNow().Add(time.Minute * 1),\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tif ip := net.ParseIP(host); ip != nil {\n\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t}\n\n\ttemplate.IsCA = true\n\ttemplate.KeyUsage |= x509.KeyUsageCertSign\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertOut, err := os.Create(certPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\tcertOut.Close()\n\n\tkeyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpem.Encode(keyOut, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)})\n\tkeyOut.Close()\n\treturn nil\n}", "func WriteCertFile(chain *ChainOfTrust, perm os.FileMode) error {\n\t// Attempt to create and open file. Error if file already exists.\n\tfileFlag := os.O_WRONLY | os.O_CREATE | os.O_EXCL\n\tfile, err := os.OpenFile(chain.Filename, fileFlag, perm)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open %v: %w\", chain.Filename, err)\n\t}\n\t// Create certificate serial number.\n\tserialNumber, err := CreateSerialNumber()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate serial number for %v: %w\", chain.Filename, err)\n\t}\n\n\t// Split the hosts list into DNS names and IP addresses.\n\tdnsNames := []string{}\n\tipAddresses := []net.IP{}\n\tfor _, host := range chain.Hosts {\n\t\tif ip := net.ParseIP(host); ip != nil {\n\t\t\tipAddresses = append(ipAddresses, ip)\n\t\t} else {\n\t\t\tdnsNames = append(dnsNames, host)\n\t\t}\n\t}\n\n\t// Define the certificate template.\n\tcertTemplate := &x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().AddDate(0, 0, chain.Days),\n\t\tSubject: *chain.Subject,\n\t\tDNSNames: dnsNames,\n\t\tIPAddresses: ipAddresses,\n\t\tBasicConstraintsValid: !chain.leaf,\n\t\tIsCA: !chain.leaf,\n\t\tExtKeyUsage: extKeyUsage(chain.leaf),\n\t\tKeyUsage: keyUsage(chain.leaf),\n\t}\n\t// Read the RSA private key file.\n\tkey, err := ReadKeyFile(chain.KeyFilename, chain.KeyPassword)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read RSA private key file %v: %w\", chain.KeyFilename, err)\n\t}\n\t// Use the key and certificate of the given chain level also as the parent key\n\t// and certificate in case the given chain level doesn't have a parent.\n\tparentKey := key\n\tparentCertTemplate := certTemplate\n\t// If the given chain level does have a parent then get its key and certificate template.\n\tif chain.Parent != nil {\n\t\tparentKey, err = ReadKeyFile(chain.Parent.KeyFilename, chain.Parent.KeyPassword)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tparentCertTemplate, err = ReadCertFile(chain.Parent.Filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Create a X.509v3 certificate.\n\tcertBytes, err := x509.CreateCertificate(\n\t\trand.Reader,\n\t\tcertTemplate,\n\t\tparentCertTemplate,\n\t\t&key.PublicKey,\n\t\tparentKey,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create certificate %v: %w\", chain.Filename, err)\n\t}\n\t// Encode the certificate in a PEM block.\n\tpemBlock := &pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: certBytes,\n\t}\n\t// Write PEM block to file.\n\tif _, err = file.Write(pem.EncodeToMemory(pemBlock)); err != nil {\n\t\treturn fmt.Errorf(\"failed to write %v: %w\", chain.Filename, err)\n\t}\n\t// Close file.\n\tif err := file.Close(); err != nil {\n\t\treturn fmt.Errorf(\"failed to close %v: %w\", chain.Filename, err)\n\t}\n\treturn nil\n}", "func GenerateCertificate(\n\tconfig BasicCertificateConfig, issuer *x509.Certificate, key *rsa.PrivateKey) ([]byte, error) {\n\tvar pubKey *rsa.PublicKey\n\n\tserialNumber, err := generateSerialNumber()\n\tif err != nil {\n\t\tlog.Printf(\"failed to generate serial number: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tnotBefore := time.Now()\n\tnotAfter := notBefore.Add(validFor)\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: config.name,\n\t\tEmailAddresses: config.emailAddresses,\n\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\n\t\tIsCA: config.isCA,\n\t}\n\n\tfor _, h := range config.hosts {\n\t\tif ip := net.ParseIP(h); ip != nil {\n\t\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t\t} else {\n\t\t\ttemplate.DNSNames = append(template.DNSNames, h)\n\t\t}\n\t}\n\n\tif template.IsCA {\n\t\ttemplate.KeyUsage |= x509.KeyUsageCertSign\n\t}\n\n\t// Self-signed\n\tif issuer == nil {\n\t\tissuer = &template\n\t\tpubKey = key.Public().(*rsa.PublicKey)\n\t} else {\n\t\tpubKey = issuer.PublicKey.(*rsa.PublicKey)\n\t}\n\n\tlog.Printf(\"Generating certificate - SN: %x\", template.SerialNumber)\n\treturn x509.CreateCertificate(rand.Reader, &template, issuer, pubKey, key)\n}", "func CreateCertificate(opt Options) ([]byte, *ecdsa.PrivateKey, error) {\n\tif opt.SignWithCert == nil && !opt.IsCA {\n\t\treturn nil, nil, errors.New(\"illegal options: must either be a CA or be signed\")\n\t}\n\tif opt.Host == \"\" {\n\t\treturn nil, nil, errors.New(\"must specify a host\")\n\t}\n\tif opt.ValidDuration < time.Hour*24 {\n\t\treturn nil, nil, errors.New(\"absurdly small expiration time\")\n\t}\n\tif opt.ValidFrom.IsZero() {\n\t\topt.ValidFrom = time.Now().Add(-time.Hour * 24)\n\t}\n\n\tpriv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnotAfter := opt.ValidFrom.Add(opt.ValidDuration)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar dnsnames []string\n\tvar ipaddrs []net.IP\n\n\tfor _, h := range opt.Addresses {\n\t\tif ip := net.ParseIP(h); ip != nil {\n\t\t\tipaddrs = append(ipaddrs, ip)\n\t\t} else {\n\t\t\tdnsnames = append(dnsnames, h)\n\t\t}\n\t}\n\n\ttemplate := &x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{opt.Organization},\n\t\t\tCountry: []string{\"GO\"},\n\t\t\tProvince: []string{\"reign\"},\n\t\t\tCommonName: opt.CommonName,\n\t\t},\n\t\tNotBefore: opt.ValidFrom,\n\t\tNotAfter: notAfter,\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tif opt.IsCA {\n\t\ttemplate.IsCA = true\n\t\ttemplate.KeyUsage |= x509.KeyUsageCertSign\n\t}\n\n\tif opt.SignWithCert == nil {\n\t\topt.SignWithCert = template\n\t\topt.SignWithPrivateKey = priv\n\t}\n\n\tderBytes, err := x509.CreateCertificate(\n\t\trand.Reader,\n\t\ttemplate,\n\t\topt.SignWithCert,\n\t\t&priv.PublicKey,\n\t\topt.SignWithPrivateKey,\n\t)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn derBytes, priv, nil\n}", "func (r *templateRouter) shouldWriteCerts(cfg *ServiceAliasConfig) bool {\n\n\t// The cert is already written\n\tif cfg.Status == ServiceAliasConfigStatusSaved {\n\t\treturn false\n\t}\n\n\tif cfg.Certificates == nil {\n\t\treturn false\n\t}\n\n\tif cfg.TLSTermination == routev1.TLSTerminationEdge || cfg.TLSTermination == routev1.TLSTerminationReencrypt {\n\t\tif hasRequiredEdgeCerts(cfg) {\n\t\t\treturn true\n\t\t}\n\n\t\tif cfg.TLSTermination == routev1.TLSTerminationReencrypt {\n\t\t\tif hasReencryptDestinationCACert(cfg) {\n\t\t\t\tlog.V(4).Info(\"a reencrypt route does not have an edge certificate, using default router certificate\", \"host\", cfg.Host)\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif len(r.defaultDestinationCAPath) > 0 {\n\t\t\t\tlog.V(4).Info(\"a reencrypt route does not have a destination CA, using default destination CA\", \"host\", cfg.Host)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tmsg := fmt.Sprintf(\"a %s terminated route with host %s does not have the required certificates. The route will still be created but no certificates will be written\",\n\t\t\tcfg.TLSTermination, cfg.Host)\n\t\t// if a default cert is configured we'll assume it is meant to be a wildcard and only log info\n\t\t// otherwise we'll consider this a warning\n\t\tif len(r.defaultCertificatePath) > 0 {\n\t\t\tlog.V(4).Info(msg)\n\t\t} else {\n\t\t\tlog.V(0).Info(msg)\n\t\t}\n\t\treturn false\n\t}\n\treturn false\n}", "func (r *templateRouter) writeCertificates(cfg *ServiceAliasConfig) error {\n\tif r.shouldWriteCerts(cfg) {\n\t\treturn r.certManager.WriteCertificatesForConfig(cfg)\n\t}\n\treturn nil\n}", "func NewCertGenerator(organization []string, options ...Option) *CertGenerator {\n\topts := newOptions()\n\tfor _, option := range options {\n\t\toption(opts)\n\t}\n\n\treturn &CertGenerator{\n\t\tfileSystem: opts.fileSystem,\n\t\tclock: opts.clock,\n\t\tos: opts.os,\n\t\torganization: organization,\n\t}\n}", "func generateTLSConfig(caCertFile string, clientCertFile string, clientKeyFile string, customVerifyFunc customVerifyFunc) (*tls.Config, error) {\n\tclientKeyPair, err := tls.LoadX509KeyPair(clientCertFile, clientKeyFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcaCertFileBytes, err := ioutil.ReadFile(caCertFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\troots := x509.NewCertPool()\n\troots.AppendCertsFromPEM(caCertFileBytes)\n\n\ttlsConfig := &tls.Config{\n\t\tCertificates: []tls.Certificate{clientKeyPair},\n\t\tRootCAs: roots,\n\t\tMinVersion: tls.VersionTLS12,\n\t\tInsecureSkipVerify: true,\n\t\t// Legacy TLS Verification using the new VerifyConnection callback\n\t\t// important for go version 1.15+ as some certificates in environments\n\t\t// that cause the new standard lib verification to fail.\n\t\t// This isn't really needed if your SSL certificates don't have the Common Name issue.\n\t\t// For more information: https://github.com/golang/go/issues/39568\n\t\tVerifyConnection: func(cs tls.ConnectionState) error {\n\t\t\tcommonName := cs.PeerCertificates[0].Subject.CommonName\n\t\t\tif commonName != cs.ServerName {\n\t\t\t\treturn fmt.Errorf(\"invalid certificate name %q, expected %q\", commonName, cs.ServerName)\n\t\t\t}\n\t\t\topts := x509.VerifyOptions{\n\t\t\t\tRoots: roots,\n\t\t\t\tIntermediates: x509.NewCertPool(),\n\t\t\t}\n\t\t\tfor _, cert := range cs.PeerCertificates[1:] {\n\t\t\t\topts.Intermediates.AddCert(cert)\n\t\t\t}\n\t\t\t_, err := cs.PeerCertificates[0].Verify(opts)\n\t\t\treturn err\n\t\t},\n\t}\n\tif customVerifyFunc != nil {\n\t\ttlsConfig.VerifyPeerCertificate = customVerifyFunc\n\t\ttlsConfig.VerifyConnection = nil\n\t}\n\ttlsConfig.BuildNameToCertificate()\n\n\treturn tlsConfig, nil\n}", "func WithCertFiles(files []string) Opts {\n\treturn func(r *retryable) {\n\t\tfor _, f := range files {\n\t\t\tc, err := ioutil.ReadFile(f)\n\t\t\tif err != nil {\n\t\t\t\tr.log.WithFields(logrus.Fields{\n\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\"file\": f,\n\t\t\t\t}).Warn(\"Failed to read certificate\")\n\t\t\t} else {\n\t\t\t\tr.rootCAPool = append(r.rootCAPool, c)\n\t\t\t}\n\t\t}\n\t}\n}", "func (c EasyCert) generateClient(certFile, keyFile string, ca *x509.Certificate, caKey crypto.PrivateKey) error {\n\ttemplate := c.newCertificate()\n\ttemplate.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}\n\n\tpriv, err := c.newPrivateKey()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.generateFromTemplate(certFile, keyFile, template, ca, priv, caKey)\n}", "func (c *IdentityConfig) CAServerCerts(caID string) ([][]byte, bool) {\n\tcfg, ok := c.caConfigs[strings.ToLower(caID)]\n\tif ok {\n\t\t//for now, we're only loading the first Cert Authority by default.\n\t\treturn cfg.TLSCAServerCerts, true\n\t}\n\treturn nil, false\n}", "func GenCertPoolUseCA(caFile string) (*x509.CertPool, error) {\n\tif caFile == \"\" {\n\t\treturn nil, errors.New(\"CA file is not set\")\n\t}\n\n\tif _, err := os.Stat(caFile); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, fmt.Errorf(\"CA file(%s) doesn't exist\", caFile)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"fail to stat the CA file(%s): %s\", caFile, err)\n\t}\n\n\tcaData, err := ioutil.ReadFile(caFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcertPool := x509.NewCertPool()\n\tcertPool.AppendCertsFromPEM(caData)\n\treturn certPool, nil\n}", "func (s *Server) createDefaultCAConfigs(cacount int) error {\n\tlog.Debugf(\"Creating %d default CA configuration files\", cacount)\n\n\tcashome, err := util.MakeFileAbs(\"ca\", s.HomeDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tos.Mkdir(cashome, 0755)\n\n\tfor i := 1; i <= cacount; i++ {\n\t\tcahome := fmt.Sprintf(cashome+\"/ca%d\", i)\n\t\tcfgFileName := filepath.Join(cahome, \"fabric-ca-config.yaml\")\n\n\t\tcaName := fmt.Sprintf(\"ca%d\", i)\n\t\tcfg := strings.Replace(defaultCACfgTemplate, \"<<<CANAME>>>\", caName, 1)\n\n\t\tcn := fmt.Sprintf(\"fabric-ca-server-ca%d\", i)\n\t\tcfg = strings.Replace(cfg, \"<<<COMMONNAME>>>\", cn, 1)\n\n\t\tdatasource := dbutil.GetCADataSource(s.CA.Config.DB.Type, s.CA.Config.DB.Datasource, i)\n\t\tcfg = strings.Replace(cfg, \"<<<DATASOURCE>>>\", datasource, 1)\n\n\t\ts.Config.CAfiles = append(s.Config.CAfiles, cfgFileName)\n\n\t\t// Now write the file\n\t\terr := os.MkdirAll(filepath.Dir(cfgFileName), 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = ioutil.WriteFile(cfgFileName, []byte(cfg), 0644)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}", "func CreateWithCerts(serverURL, clusterName, userName string, caCert []byte, clientKey []byte, clientCert []byte) *clientcmdapi.Config {\n\tconfig := CreateBasic(serverURL, clusterName, userName, caCert)\n\tconfig.AuthInfos[userName] = &clientcmdapi.AuthInfo{\n\t\tClientKeyData: clientKey,\n\t\tClientCertificateData: clientCert,\n\t}\n\treturn config\n}", "func generateTLSConfig(certfile string) (tls.Config, error) {\n\ttlsConfig := tls.Config{}\n\ttlsConfig.InsecureSkipVerify = true\n\tcertficate, err := ioutil.ReadFile(certfile)\n\tif err != nil {\n\t\treturn tlsConfig, err\n\t}\n\trootCAs := x509.NewCertPool()\n\tif !rootCAs.AppendCertsFromPEM([]byte(certficate)) {\n\t\treturn tlsConfig, errors.New(\"ERROR: Fehler beim parsen des Serverzertifikats.\\n\")\n\t}\n\treturn tlsConfig, nil\n}", "func (c *AdminKubeConfigSignerCertKey) Generate(parents asset.Parents) error {\n\tcfg := &CertCfg{\n\t\tSubject: pkix.Name{CommonName: \"admin-kubeconfig-signer\", OrganizationalUnit: []string{\"openshift\"}},\n\t\tKeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tValidity: ValidityTenYears,\n\t\tIsCA: true,\n\t}\n\n\treturn c.SelfSignedCertKey.Generate(cfg, \"admin-kubeconfig-signer\")\n}", "func serverkeygen(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\taps := chi.URLParam(r, apsParamName)\n\n\tcsr, err := readCSRRequest(r.Body, true)\n\tif writeOnError(ctx, w, logMsgReadBodyFailed, err) {\n\t\treturn\n\t}\n\n\t// Request certificate from backing CA.\n\tcert, key, err := caFromContext(ctx).ServerKeyGen(ctx, csr, aps, r)\n\tif writeOnError(ctx, w, logMsgEnrollFailed, err) {\n\t\treturn\n\t}\n\n\t// Encode and write response.\n\tvar keyContentType string\n\tif _, p8err := x509.ParsePKCS8PrivateKey(key); p8err == nil {\n\t\tkeyContentType = mimeTypePKCS8\n\t} else if _, p7err := pkcs7.Parse(key); p7err == nil {\n\t\tkeyContentType = mimeTypePKCS7GenKey\n\t} else {\n\t\tLoggerFromContext(ctx).Errorf(\"failed to parse private key: %v, %v\", p8err, p7err)\n\t\terrInternal.Write(w)\n\t\treturn\n\t}\n\n\tbuf, contentType, err := encodeMultiPart(\n\t\tserverKeyGenBoundary,\n\t\t[]multipartPart{\n\t\t\t{contentType: keyContentType, data: key},\n\t\t\t{contentType: mimeTypePKCS7CertsOnly, data: cert},\n\t\t},\n\t)\n\tif writeOnError(ctx, w, logMsgMultipartEncodeFailed, err) {\n\t\treturn\n\t}\n\n\twriteResponse(w, contentType, false, buf.Bytes())\n}", "func GenerateAndSignCertificate(root *KeyPair, publicKeyFile, privateKeyFile string) error {\n\tcert := &x509.Certificate{\n\t\tSerialNumber: big.NewInt(1658),\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"ayada\"},\n\t\t\tCountry: []string{\"US\"},\n\t\t\tProvince: []string{\"California\"},\n\t\t\tLocality: []string{\"San Francisco\"},\n\t\t\tCommonName: \"localhost\",\n\t\t},\n\t\tDNSNames: []string{\"localhost\", \"ayada.dev\"},\n\t\tIPAddresses: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().AddDate(10, 0, 0),\n\t\tSubjectKeyId: []byte{1, 2, 3, 4, 6},\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},\n\t\tKeyUsage: x509.KeyUsageDigitalSignature,\n\t}\n\n\tcertPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertBytes, err := x509.CreateCertificate(rand.Reader, cert, root.PublicKey, &certPrivKey.PublicKey, root.PrivateKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertPEM := new(bytes.Buffer)\n\tpem.Encode(certPEM, &pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: certBytes,\n\t})\n\n\tcertPrivKeyPEM := new(bytes.Buffer)\n\tpem.Encode(certPrivKeyPEM, &pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(certPrivKey),\n\t})\n\n\tif err := ioutil.WriteFile(publicKeyFile, certPEM.Bytes(), 0644); err != nil {\n\t\treturn fmt.Errorf(\"failed to write to public key file: %w\", err)\n\t}\n\n\tif err := ioutil.WriteFile(privateKeyFile, certPrivKeyPEM.Bytes(), 0644); err != nil {\n\t\treturn fmt.Errorf(\"failed to write to private key file: %w\", err)\n\t}\n\n\treturn nil\n}", "func GenerateTestCert() (cert, key []byte, expectedDn string, err error) {\n\tpriv, serialNumber, err := generatePrivateKey()\n\tif err != nil {\n\t\treturn cert, key, expectedDn, err\n\t}\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: true,\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: \"test-cn\",\n\t\t\tOrganization: []string{\"test-ou\"},\n\t\t},\n\t}\n\tcert, err = x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn cert, key, expectedDn, err\n\t}\n\tbuf := new(bytes.Buffer)\n\tif err = pem.Encode(buf, &pem.Block{Type: \"CERTIFICATE\", Bytes: cert}); err != nil {\n\t\treturn\n\t}\n\tcert = buf.Bytes()\n\tkey, err = encodePrivateKeyInPemFormat(priv)\n\tif err != nil {\n\t\treturn cert, key, expectedDn, err\n\t}\n\texpectedDn = \"CN=test-cn,O=test-ou\"\n\treturn cert, key, expectedDn, err\n}", "func createSyncCertificates(t *testing.T, ip string, useDocker bool) syncCertificates {\n\timage := os.Getenv(\"ARANGODB\")\n\tif image == \"\" && useDocker {\n\t\tt.Fatal(\"Need ARANGODB envvar with name of ArangoDB docker image\")\n\t}\n\n\tdir, err := ioutil.TempDir(\"\", \"starter-test\")\n\tif err != nil {\n\t\tt.Fatalf(\"TempDir failed: %s\", err)\n\t}\n\n\t// Create certificates\n\tcmdLines := []string{\n\t\t\"/usr/sbin/arangosync create tls ca --cert=/data/tls-ca.crt --key=/data/tls-ca.key\",\n\t\t\"/usr/sbin/arangosync create tls keyfile --cacert=/data/tls-ca.crt --cakey=/data/tls-ca.key --keyfile=/data/tls-a.keyfile --host=\" + ip,\n\t\t\"/usr/sbin/arangosync create tls keyfile --cacert=/data/tls-ca.crt --cakey=/data/tls-ca.key --keyfile=/data/tls-b.keyfile --host=\" + ip,\n\t\t\"/usr/sbin/arangosync create client-auth ca --cert=/data/client-auth-ca.crt --key=/data/client-auth-ca.key\",\n\t}\n\n\tfor i, cmdLine := range cmdLines {\n\t\tif useDocker {\n\t\t\tcid := createDockerID(fmt.Sprintf(\"starter-test-cluster-sync-util-%d\", i))\n\t\t\tdockerRun := Spawn(t, strings.Join([]string{\n\t\t\t\t\"docker run -i\",\n\t\t\t\t\"--label starter-test=true\",\n\t\t\t\t\"--name=\" + cid,\n\t\t\t\t\"--rm\",\n\t\t\t\tfmt.Sprintf(\"-v %s:/data\", dir),\n\t\t\t\timage,\n\t\t\t\tcmdLine,\n\t\t\t}, \" \"))\n\t\t\tdefer dockerRun.Close()\n\t\t\tdefer removeDockerContainer(t, cid)\n\n\t\t\tif err := dockerRun.WaitTimeout(time.Minute); err != nil {\n\t\t\t\tt.Fatalf(\"Failed to run '%s': %s\", cmdLine, err)\n\t\t\t}\n\t\t} else {\n\t\t\tdataDir := strings.TrimRight(dir, \"/\") + \"/\"\n\t\t\trun := Spawn(t, strings.Replace(cmdLine, \"/data/\", dataDir, -1))\n\t\t\tdefer run.Close()\n\n\t\t\tif err := run.WaitTimeout(time.Minute); err != nil {\n\t\t\t\tt.Fatalf(\"Failed to run '%s': %s\", cmdLine, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Create cluster secret file\n\tvar clusterSecretFile string\n\t{\n\t\tsecret := make([]byte, 32)\n\t\tclusterSecretFile = filepath.Join(dir, \"cluster-secret\")\n\t\trand.Read(secret)\n\t\tsecretEncoded := hex.EncodeToString(secret)\n\t\tif err := ioutil.WriteFile(clusterSecretFile, []byte(secretEncoded), 0644); err != nil {\n\t\t\tt.Fatalf(\"Failed to create cluster secret file: %s\", err)\n\t\t}\n\t}\n\n\t// Create master secret file\n\tvar masterSecretFile string\n\t{\n\t\tsecret := make([]byte, 32)\n\t\tmasterSecretFile = filepath.Join(dir, \"master-secret\")\n\t\trand.Read(secret)\n\t\tsecretEncoded := hex.EncodeToString(secret)\n\t\tif err := ioutil.WriteFile(masterSecretFile, []byte(secretEncoded), 0644); err != nil {\n\t\t\tt.Fatalf(\"Failed to create master secret file: %s\", err)\n\t\t}\n\t}\n\n\tresult := syncCertificates{}\n\tresult.Dir = dir\n\tresult.TLS.CACertificate = filepath.Join(dir, \"tls-ca.crt\")\n\tresult.TLS.CAKey = filepath.Join(dir, \"tls-ca.key\")\n\tresult.TLS.DCA.Keyfile = filepath.Join(dir, \"tls-a.keyfile\")\n\tresult.TLS.DCB.Keyfile = filepath.Join(dir, \"tls-b.keyfile\")\n\tresult.ClientAuth.CACertificate = filepath.Join(dir, \"client-auth-ca.crt\")\n\tresult.ClientAuth.CAKey = filepath.Join(dir, \"client-auth-ca.key\")\n\tresult.ClusterSecret = clusterSecretFile\n\tresult.MasterSecret = masterSecretFile\n\n\treturn result\n}", "func (e *endpoints) getCerts(ctx context.Context) ([]tls.Certificate, *x509.CertPool, error) {\n\tds := e.c.Catalog.DataStores()[0]\n\n\tresp, err := ds.FetchBundle(ctx, &datastore_pb.FetchBundleRequest{\n\t\tTrustDomainId: e.c.TrustDomain.String(),\n\t})\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"get bundle from datastore: %v\", err)\n\t}\n\tif resp.Bundle == nil {\n\t\treturn nil, nil, errors.New(\"bundle not found\")\n\t}\n\n\tvar caCerts []*x509.Certificate\n\tfor _, rootCA := range resp.Bundle.RootCas {\n\t\trootCACerts, err := x509.ParseCertificates(rootCA.DerBytes)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"parse bundle: %v\", err)\n\t\t}\n\t\tcaCerts = append(caCerts, rootCACerts...)\n\t}\n\n\tcaPool := x509.NewCertPool()\n\tfor _, c := range caCerts {\n\t\tcaPool.AddCert(c)\n\t}\n\n\te.mtx.RLock()\n\tdefer e.mtx.RUnlock()\n\n\tcertChain := [][]byte{}\n\tfor i, cert := range e.svid {\n\t\tcertChain = append(certChain, cert.Raw)\n\t\t// add the intermediates into the root CA pool since we need to\n\t\t// validate old agents that don't present intermediates with the\n\t\t// certificate request.\n\t\t// TODO: remove this hack in 0.8\n\t\tif i > 0 {\n\t\t\tcaPool.AddCert(cert)\n\t\t}\n\t}\n\n\ttlsCert := tls.Certificate{\n\t\tCertificate: certChain,\n\t\tPrivateKey: e.svidKey,\n\t}\n\n\treturn []tls.Certificate{tlsCert}, caPool, nil\n}", "func generateCertificate(d *schema.ResourceData, m interface{}, appID string) (*okta.JsonWebKey, error) {\n\trequestExecutor := getRequestExecutor(m)\n\tyears := d.Get(\"key_years_valid\").(int)\n\turl := fmt.Sprintf(\"/api/v1/apps/%s/credentials/keys/generate?validityYears=%d\", appID, years)\n\treq, err := requestExecutor.NewRequest(\"POST\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar key *okta.JsonWebKey\n\n\t_, err = requestExecutor.Do(req, &key)\n\n\treturn key, err\n}", "func (ed *ElasticsearchDeployment) CreateCerts() error {\n\terr := extractSecretsToFile(ed.Jaeger, ed.Secrets, masterSecret, esSecret, jaegerSecret, curatorSecret)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to extract certificates from secrets to file\")\n\t}\n\treturn createESCerts(ed.CertScript, ed.Jaeger)\n}", "func GenerateCaddy(ingresses []networkingv1.Ingress, o cmd.Options) (*caddy.Config, error) {\n\tklog.Infof(\"will generate ingresses: %+v\", getDomains(ingresses))\n\n\tappsConfig, err := getAppsConfig(ingresses, o.BackendURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get apps config: %w\", err)\n\t}\n\n\ttlsConfig, err := getTLSConfig(ingresses, o)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get TLS config: %w\", err)\n\t}\n\n\tcaddyHost := net.JoinHostPort(o.CaddyHost, \"2019\")\n\n\tpTrue := true\n\tc := &caddy.Config{\n\t\tAdmin: &caddy.AdminConfig{\n\t\t\tListen: caddyHost,\n\t\t\tDisabled: false,\n\t\t\tConfig: &caddy.ConfigSettings{\n\t\t\t\tPersist: &pTrue,\n\t\t\t},\n\t\t\tOrigins: []string{\n\t\t\t\tcaddyHost,\n\t\t\t},\n\t\t\tEnforceOrigin: true,\n\t\t},\n\t\tAppsRaw: caddy.ModuleMap{\n\t\t\t\"http\": caddyconfig.JSON(appsConfig, nil),\n\t\t\t\"tls\": caddyconfig.JSON(tlsConfig, nil),\n\t\t},\n\t}\n\n\treturn c, nil\n}", "func setCertsArgs(cfg *v3.RancherKubernetesEngineConfig) {\n\tsetArg := func(m map[string]string, key string, value string) {\n\t\tif m[key] == \"\" {\n\t\t\tm[key] = value\n\t\t}\n\t}\n\tif cfg.Services.KubeController.ExtraArgs == nil {\n\t\tcfg.Services.KubeController.ExtraArgs = map[string]string{}\n\t}\n\tsetArg(cfg.Services.KubeController.ExtraArgs, \"client-ca-file\", \"/etc/kubernetes/ssl/kube-ca.pem\")\n\tsetArg(cfg.Services.KubeController.ExtraArgs, \"cluster-signing-cert-file\", \"/etc/kubernetes/ssl/kube-ca.pem\")\n\tsetArg(cfg.Services.KubeController.ExtraArgs, \"cluster-signing-key-file\", \"/etc/kubernetes/ssl/kube-ca-key.pem\")\n\tsetArg(cfg.Services.KubeController.ExtraArgs, \"requestheader-client-ca-file\", \"/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem\")\n}", "func WritePEMFiles(dir string) (string, func(), error) {\n\ttempDir, err := ioutil.TempDir(dir, \"go-test-pemfiles\")\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tdata := `-----BEGIN CERTIFICATE-----\nMIICEjCCAXugAwIBAgIRAK9oivV13n8NjkrxlRObpfQwDQYJKoZIhvcNAQELBQAw\nEjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2\nMDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzCBnzANBgkqhkiG9w0BAQEFAAOBjQAw\ngYkCgYEAq03JACUKtgXTKoYNvFPEKmIk5fS4x2MxczPfiT8KLo2gVikfEMqCtoIt\nNcXL+xxYZ8dA2Y26Yk+WjeEzB+/W1qYbei6kZR+GOy3TFINJoqYFZq4sDF6c1Gch\nACqB4oE+4kLdq4hS9cM2IjEUovBQa+Q9frU7ONLLFfOWwJ5Wt0ECAwEAAaNmMGQw\nDgYDVR0PAQH/BAQDAgKkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQF\nMAMBAf8wLAYDVR0RBCUwI4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAA\nAAABMA0GCSqGSIb3DQEBCwUAA4GBAHzFYeTxkJdvcahc7C1eKNLkEnus+SBaMeuT\nQSeywW57xhhQ21CgFAZV2yieuBVoZbsZs4+9Nr7Lgx+QuE6xR3ZXOBeZVqx3bVqj\njc5T1srmqkU/gF/3CALuSuwHFyCIdmuYkgmnDUqE8vJ4eStuDaMVWjGvPYmi3am7\nyc1YAUB7\n-----END CERTIFICATE-----`\n\tpath := filepath.Join(tempDir, \"ca.pem\")\n\tif err := ioutil.WriteFile(path, []byte(data), 0644); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tdata = `-----BEGIN CERTIFICATE-----\nMIICEjCCAXugAwIBAgIRAK9oivV13n8NjkrxlRObpfQwDQYJKoZIhvcNAQELBQAw\nEjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2\nMDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzCBnzANBgkqhkiG9w0BAQEFAAOBjQAw\ngYkCgYEAq03JACUKtgXTKoYNvFPEKmIk5fS4x2MxczPfiT8KLo2gVikfEMqCtoIt\nNcXL+xxYZ8dA2Y26Yk+WjeEzB+/W1qYbei6kZR+GOy3TFINJoqYFZq4sDF6c1Gch\nACqB4oE+4kLdq4hS9cM2IjEUovBQa+Q9frU7ONLLFfOWwJ5Wt0ECAwEAAaNmMGQw\nDgYDVR0PAQH/BAQDAgKkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQF\nMAMBAf8wLAYDVR0RBCUwI4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAA\nAAABMA0GCSqGSIb3DQEBCwUAA4GBAHzFYeTxkJdvcahc7C1eKNLkEnus+SBaMeuT\nQSeywW57xhhQ21CgFAZV2yieuBVoZbsZs4+9Nr7Lgx+QuE6xR3ZXOBeZVqx3bVqj\njc5T1srmqkU/gF/3CALuSuwHFyCIdmuYkgmnDUqE8vJ4eStuDaMVWjGvPYmi3am7\nyc1YAUB7\n-----END CERTIFICATE-----`\n\tpath = filepath.Join(tempDir, \"cert.pem\")\n\tif err = ioutil.WriteFile(path, []byte(data), 0644); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tdata = `-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCrTckAJQq2BdMqhg28U8QqYiTl9LjHYzFzM9+JPwoujaBWKR8Q\nyoK2gi01xcv7HFhnx0DZjbpiT5aN4TMH79bWpht6LqRlH4Y7LdMUg0mipgVmriwM\nXpzUZyEAKoHigT7iQt2riFL1wzYiMRSi8FBr5D1+tTs40ssV85bAnla3QQIDAQAB\nAoGABtvWcGsLQr549froEeJIuGm1kH975n/SOwqYqKYdgj+pa8m5tLJnCWes57pD\nsIox//W6YvuJuuX04TljEa5Iq7604Ien0x/FCCQshW/3/skEXkKc89+a1eLw9wt/\nc75qow5S5CG01Ht/+AqWCzkSADE/QTFfnSMLfYGfOm1X7AECQQDWAtGny7GGeBH+\nC/nMLags2q0nc0ZZ/QcdwMGtN2q0ZfiYhQw968FuEiWSeiiGhGUPTrOkERU/l93S\nNYrovJkNAkEAzOnnTdYWwmfs+LBQIYGQOmuTYbmzn0lpmeDUsCtSi3G+pRVCvpoc\n4sFMwrFTea1257fryUfxXUkE5mGYYqQiBQJAW6VvZNzc1AndIp68RUyUBUlL92Xt\nDaJGht5B0ky1/DTixWXMfUPVXK6WumhnrFtL78czNKJAKDB/xII7TzlcjQJBALhD\n2fj3fM3i0IitW9FVhhHSrNyjNjAVvv1d3URyIK8+YJZosPVe9ny+ID2vYgY4A4XJ\nsSD2LciaIerddj+1otUCQQDNLXTkZ2riEEhNoZfiDUumlJgAJw0M07SFyKyU60yn\nr3nPX1rJpUYnyRYsRf+F6dwvAqECKgQao/QRKriAubDk\n-----END RSA PRIVATE KEY-----`\n\tpath = filepath.Join(tempDir, \"key.pem\")\n\tif err = ioutil.WriteFile(path, []byte(data), 0644); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\trmFunc := func() { os.RemoveAll(tempDir) }\n\treturn tempDir, rmFunc, nil\n}", "func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) {\n\tsigAlgo := helpers.SignerAlgo(priv)\n\tif sigAlgo == x509.UnknownSignatureAlgorithm {\n\t\treturn nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable)\n\t}\n\n\tsubj, err := req.Name()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar tpl = x509.CertificateRequest{\n\t\tSubject: subj,\n\t\tSignatureAlgorithm: sigAlgo,\n\t}\n\n\tfor i := range req.Hosts {\n\t\tif ip := net.ParseIP(req.Hosts[i]); ip != nil {\n\t\t\ttpl.IPAddresses = append(tpl.IPAddresses, ip)\n\t\t} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {\n\t\t\ttpl.EmailAddresses = append(tpl.EmailAddresses, email.Address)\n\t\t} else if uri, err := url.ParseRequestURI(req.Hosts[i]); err == nil && uri != nil {\n\t\t\ttpl.URIs = append(tpl.URIs, uri)\n\t\t} else {\n\t\t\ttpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])\n\t\t}\n\t}\n\n\ttpl.ExtraExtensions = []pkix.Extension{}\n\n\tif req.CA != nil {\n\t\terr = appendCAInfoToCSR(req.CA, &tpl)\n\t\tif err != nil {\n\t\t\terr = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif req.DelegationEnabled {\n\t\ttpl.ExtraExtensions = append(tpl.Extensions, helpers.DelegationExtension)\n\t}\n\n\tif req.Extensions != nil {\n\t\terr = appendExtensionsToCSR(req.Extensions, &tpl)\n\t\tif err != nil {\n\t\t\terr = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tcsr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to generate a CSR: %v\", err)\n\t\terr = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)\n\t\treturn\n\t}\n\tblock := pem.Block{\n\t\tType: \"CERTIFICATE REQUEST\",\n\t\tBytes: csr,\n\t}\n\n\tlog.Info(\"encoded CSR\")\n\tcsr = pem.EncodeToMemory(&block)\n\treturn\n}", "func GetFakeSSLCert(o []string, cn string, dns []string) (cert, key []byte) {\n\n\tvar priv interface{}\n\tvar err error\n\n\tpriv, err = rsa.GenerateKey(rand.Reader, 2048)\n\n\tif err != nil {\n\t\tklog.Exitf(\"failed to generate fake private key: %s\", err)\n\t}\n\n\tnotBefore := time.Now()\n\t// This certificate is valid for 365 days\n\tnotAfter := notBefore.Add(365 * 24 * time.Hour)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\n\tif err != nil {\n\t\tklog.Exitf(\"failed to generate fake serial number: %s\", err)\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: o,\n\t\t\tCommonName: cn,\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tDNSNames: dns,\n\t}\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.(*rsa.PrivateKey).PublicKey, priv)\n\tif err != nil {\n\t\tklog.Exitf(\"Failed to create fake certificate: %s\", err)\n\t}\n\n\tcert = pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\tkey = pem.EncodeToMemory(&pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv.(*rsa.PrivateKey))})\n\treturn cert, key\n}", "func createCertificate(data *dataBundle) (*certutil.ParsedCertBundle, error) {\n\tvar err error\n\tresult := &certutil.ParsedCertBundle{}\n\n\tserialNumber, err := certutil.GenerateSerialNumber()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := certutil.GeneratePrivateKey(data.params.KeyType,\n\t\tdata.params.KeyBits,\n\t\tresult); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsubjKeyID, err := certutil.GetSubjKeyID(result.PrivateKey)\n\tif err != nil {\n\t\treturn nil, errutil.InternalError{Err: fmt.Sprintf(\"error getting subject key ID: %s\", err)}\n\t}\n\n\tcertTemplate := &x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tNotBefore: time.Now().Add(-30 * time.Second),\n\t\tNotAfter: data.params.NotAfter,\n\t\tIsCA: false,\n\t\tSubjectKeyId: subjKeyID,\n\t\tSubject: data.params.Subject,\n\t\tDNSNames: data.params.DNSNames,\n\t\tEmailAddresses: data.params.EmailAddresses,\n\t\tIPAddresses: data.params.IPAddresses,\n\t\tURIs: data.params.URIs,\n\t}\n\tif data.params.NotBeforeDuration > 0 {\n\t\tcertTemplate.NotBefore = time.Now().Add(-1 * data.params.NotBeforeDuration)\n\t}\n\n\tif err := handleOtherSANs(certTemplate, data.params.OtherSANs); err != nil {\n\t\treturn nil, errutil.InternalError{Err: errwrap.Wrapf(\"error marshaling other SANs: {{err}}\", err).Error()}\n\t}\n\n\t// Add this before calling addKeyUsages\n\tif data.signingBundle == nil {\n\t\tcertTemplate.IsCA = true\n\t} else if data.params.BasicConstraintsValidForNonCA {\n\t\tcertTemplate.BasicConstraintsValid = true\n\t\tcertTemplate.IsCA = false\n\t}\n\n\t// This will only be filled in from the generation paths\n\tif len(data.params.PermittedDNSDomains) > 0 {\n\t\tcertTemplate.PermittedDNSDomains = data.params.PermittedDNSDomains\n\t\tcertTemplate.PermittedDNSDomainsCritical = true\n\t}\n\n\taddPolicyIdentifiers(data, certTemplate)\n\n\taddKeyUsages(data, certTemplate)\n\n\taddExtKeyUsageOids(data, certTemplate)\n\n\tcertTemplate.IssuingCertificateURL = data.params.URLs.IssuingCertificates\n\tcertTemplate.CRLDistributionPoints = data.params.URLs.CRLDistributionPoints\n\tcertTemplate.OCSPServer = data.params.URLs.OCSPServers\n\n\tvar certBytes []byte\n\tif data.signingBundle != nil {\n\t\tswitch data.signingBundle.PrivateKeyType {\n\t\tcase certutil.RSAPrivateKey:\n\t\t\tcertTemplate.SignatureAlgorithm = x509.SHA256WithRSA\n\t\tcase certutil.ECPrivateKey:\n\t\t\tcertTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256\n\t\t}\n\n\t\tcaCert := data.signingBundle.Certificate\n\t\tcertTemplate.AuthorityKeyId = caCert.SubjectKeyId\n\n\t\tcertBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, result.PrivateKey.Public(), data.signingBundle.PrivateKey)\n\t} else {\n\t\t// Creating a self-signed root\n\t\tif data.params.MaxPathLength == 0 {\n\t\t\tcertTemplate.MaxPathLen = 0\n\t\t\tcertTemplate.MaxPathLenZero = true\n\t\t} else {\n\t\t\tcertTemplate.MaxPathLen = data.params.MaxPathLength\n\t\t}\n\n\t\tswitch data.params.KeyType {\n\t\tcase \"rsa\":\n\t\t\tcertTemplate.SignatureAlgorithm = x509.SHA256WithRSA\n\t\tcase \"ec\":\n\t\t\tcertTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256\n\t\t}\n\n\t\tcertTemplate.AuthorityKeyId = subjKeyID\n\t\tcertTemplate.BasicConstraintsValid = true\n\t\tcertBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey)\n\t}\n\n\tif err != nil {\n\t\treturn nil, errutil.InternalError{Err: fmt.Sprintf(\"unable to create certificate: %s\", err)}\n\t}\n\n\tresult.CertificateBytes = certBytes\n\tresult.Certificate, err = x509.ParseCertificate(certBytes)\n\tif err != nil {\n\t\treturn nil, errutil.InternalError{Err: fmt.Sprintf(\"unable to parse created certificate: %s\", err)}\n\t}\n\n\tif data.signingBundle != nil {\n\t\tif len(data.signingBundle.Certificate.AuthorityKeyId) > 0 &&\n\t\t\t!bytes.Equal(data.signingBundle.Certificate.AuthorityKeyId, data.signingBundle.Certificate.SubjectKeyId) {\n\n\t\t\tresult.CAChain = []*certutil.CertBlock{\n\t\t\t\t&certutil.CertBlock{\n\t\t\t\t\tCertificate: data.signingBundle.Certificate,\n\t\t\t\t\tBytes: data.signingBundle.CertificateBytes,\n\t\t\t\t},\n\t\t\t}\n\t\t\tresult.CAChain = append(result.CAChain, data.signingBundle.CAChain...)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func GetAllCertif(w http.ResponseWriter, r *http.Request) {\r\n\tparams := mux.Vars(r)\r\n\tallcertif=allcertif[:0]\r\n\t//Verification of the access authorization and collect of the user's certificates\r\n\t\r\n\tif params[\"userid\"]==clientnum{\r\n\t\tfor _, item := range certif {\r\n\t\t\t//We add the user's certifications in 'allcertif'\r\n\t\t\tif item.Ownerid==clientnum {\r\n\t\t\t\tallcertif=append(allcertif, item)\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\t//We display the user's information for more clarity\r\n\t\tfor _, itemm := range owner {\r\n\t\t\tif itemm.Userid==clientnum {\r\n\t\t\tfmt.Fprintf(w, \"Client information: \")\r\n\t\t\tjson.NewEncoder(w).Encode(itemm)\r\n\t\t\tfmt.Fprintf(w, \"\\n\")\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\t//We display the user's certificates\r\n\t\tfmt.Fprintf(w, \"Certificates of the client: \")\r\n\t\tjson.NewEncoder(w).Encode(allcertif)\r\n\t\t\r\n\t}\telse {\r\n\tfmt.Fprintf(w, \"You can't access this information.\")\r\n\t}\r\n}", "func GenerateCertificate(b *Iblock, lc []BlockHeader, index int) string {\n\n\t// make a slice of hashes up that follow up to the genesis\n\thashSlice := []string{b.HashFromBlock()}\n\tfor index > 0 {\n\t\tprevLevelIndex := FindPrevLevelBlockIndex(lc, index)\n\t\tif lc[index].LevelPrevHash == lc[prevLevelIndex].Hash {\n\t\t\tindex = prevLevelIndex\n\t\t\thashSlice = append(hashSlice, lc[prevLevelIndex].Hash)\n\t\t}\n\t}\n\n\t// generate hash for hash slice\n\ttoHash := \"\"\n\tfor _, v := range hashSlice {\n\t\ttoHash += v\n\t}\n\t//fmt.Println(toHash)\n\treturn CreateHashFromString(toHash)\n}", "func createESCerts(script string) error {\n\tnamespace, err := k8sutil.GetWatchNamespace()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get watch namespace: %v\", err)\n\t}\n\t// #nosec G204: Subprocess launching should be audited\n\tcmd := exec.Command(\"bash\", script)\n\tcmd.Env = append(os.Environ(),\n\t\t\"NAMESPACE=\"+namespace,\n\t)\n\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"script\": script,\n\t\t\t\"out\": string(out)}).\n\t\t\tError(\"Failed to create certificates\")\n\t\treturn fmt.Errorf(\"error running script %s: %v\", script, err)\n\t}\n\treturn nil\n}", "func GenerateSelfSignedCert(host, certPath, keyPath string, alternateIPs []net.IP, alternateDNS []string) error {\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: big.NewInt(1),\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: fmt.Sprintf(\"%s@%d\", host, time.Now().Unix()),\n\t\t},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().Add(time.Hour * 24 * 365),\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: true,\n\t}\n\n\tif ip := net.ParseIP(host); ip != nil {\n\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t} else {\n\t\ttemplate.DNSNames = append(template.DNSNames, host)\n\t}\n\n\ttemplate.IPAddresses = append(template.IPAddresses, alternateIPs...)\n\ttemplate.DNSNames = append(template.DNSNames, alternateDNS...)\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Generate cert\n\tcertBuffer := bytes.Buffer{}\n\tif err := pem.Encode(&certBuffer, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes}); err != nil {\n\t\treturn err\n\t}\n\n\t// Generate key\n\tkeyBuffer := bytes.Buffer{}\n\tif err := pem.Encode(&keyBuffer, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {\n\t\treturn err\n\t}\n\n\t// Write cert\n\tif err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(certPath, certBuffer.Bytes(), os.FileMode(0644)); err != nil {\n\t\treturn err\n\t}\n\n\t// Write key\n\tif err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(keyPath, keyBuffer.Bytes(), os.FileMode(0600)); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func createCertsSecret(\n\townerReference metav1.OwnerReference,\n\tsecretName string,\n\tserviceName string,\n\tnamespace string,\n) (*v1.Secret, error) {\n\n\t// Create a signing certificate\n\tcaKeyPair, err := triple.NewCA(fmt.Sprintf(\"%s-ca\", serviceName))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create root-ca: %v\", err)\n\t}\n\n\t// Create app certs signed through the certificate created above\n\tapiServerKeyPair, err := triple.NewServerKeyPair(\n\t\tcaKeyPair,\n\t\tstrings.Join([]string{serviceName, namespace, \"svc\"}, \".\"),\n\t\tserviceName,\n\t\tnamespace,\n\t\t[]string{},\n\t\t[]string{},\n\t)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create server key pair: %v\", err)\n\t}\n\n\t// create an opaque secret resource with certificate(s) created above\n\tsecret := &v1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: secretName,\n\t\t\tNamespace: namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{ownerReference},\n\t\t},\n\t\tType: v1.SecretTypeOpaque,\n\t\tData: map[string][]byte{\n\t\t\tappCrt: cert.EncodeCertPEM(apiServerKeyPair.Cert),\n\t\t\tappKey: cert.EncodePrivateKeyPEM(apiServerKeyPair.Key),\n\t\t\trootCrt: cert.EncodeCertPEM(caKeyPair.Cert),\n\t\t},\n\t}\n\n\tresult := shared.Create(context.TODO(), secret)\n\n\treturn secret, result\n}", "func GenerateCert(config *CertConfig) (*rsa.PrivateKey, *x509.Certificate, *x509.Certificate, error) {\n\tif err := verifyConfig(config); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\t// If no custom CAKey and CACert are provided we have to generate them\n\tcaKey, err := newPrivateKey()\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tcaCert, err := newSelfSignedCACertificate(caKey)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tkey, err := newPrivateKey()\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tcert, err := newSignedCertificate(config, key, caCert, caKey)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\treturn key, caCert, cert, nil\n\n}", "func (g InMemoryGenerator) generateCertificate(request credsgen.CertificateGenerationRequest) (credsgen.Certificate, error) {\n\tif !request.CA.IsCA {\n\t\treturn credsgen.Certificate{}, errors.Errorf(\"The passed CA is not a CA\")\n\t}\n\n\tcert := credsgen.Certificate{\n\t\tIsCA: false,\n\t}\n\n\t// Generate certificate\n\tsigningReq, privateKey, err := g.GenerateCertificateSigningRequest(request)\n\tif err != nil {\n\t\treturn credsgen.Certificate{}, err\n\t}\n\t// Sign certificate\n\tsigningProfile := &config.SigningProfile{\n\t\tUsage: []string{\"server auth\", \"client auth\"},\n\t\tExpiry: time.Duration(g.Expiry*24) * time.Hour,\n\t\tExpiryString: fmt.Sprintf(\"%dh\", g.Expiry*24),\n\t}\n\tcert.Certificate, err = g.signCertificate(signingReq, signingProfile, request)\n\tif err != nil {\n\t\treturn credsgen.Certificate{}, err\n\t}\n\tcert.PrivateKey = privateKey\n\n\treturn cert, nil\n}", "func (c *Cluster) generateAuthFile() error {\n\trootPath := path.Join(authPath, c.UUID)\n\tif err := os.MkdirAll(rootPath, 0775); err != nil {\n\t\tlogrus.Errorf(\"make dir[%s] failed, err: %s\", rootPath, err.Error())\n\t\treturn err\n\t}\n\n\tcaPath := path.Join(rootPath, caFile)\n\tcertPath := path.Join(rootPath, certFile)\n\tkeyPath := path.Join(rootPath, keyFile)\n\tif err := ioutil.WriteFile(caPath, []byte(c.CAData), 0755); err != nil {\n\t\tlogrus.Errorf(\"write %s failed, err: %s\", caPath, err.Error())\n\t\treturn err\n\t}\n\n\tif err := ioutil.WriteFile(certPath, []byte(c.CertData), 0755); err != nil {\n\t\tlogrus.Errorf(\"write %s failed, err: %s\", certPath, err.Error())\n\t\treturn err\n\t}\n\n\tif err := ioutil.WriteFile(keyPath, []byte(c.KeyData), 0755); err != nil {\n\t\tlogrus.Errorf(\"write %s failed, err: %s\", keyPath, err.Error())\n\t\treturn err\n\t}\n\tc.CAFile = caPath\n\tc.CertFile = certPath\n\tc.KeyFile = keyPath\n\treturn nil\n}", "func (c *CertGenerator) GetCA() *CA { return c.CA }", "func (state *RuntimeState) GenerateCert(domain string) ([]byte, error) {\n\n\t//generate new cert logic comes here and put in cache store\n\t//sleep for 10 secs to emulate external service call\n\ttime.Sleep(time.Second * 10)\n\tnewcert := \"foo-$\" + domain\n\n\tctx := context.Background()\n\n\tstate.CertMutex.Lock()\n\n\tdefer state.CertMutex.Unlock()\n\n\tdomainDir := filepath.Join(state.Config.CertStoreName, domain)\n\n\tcertstoreDir := autocert.DirCache(domainDir)\n\t//for every new domain name we create a new directory in \"certs\"(Cachestore) and store the cert.\n\n\terr := certstoreDir.Put(ctx, domain, []byte(newcert))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\trenewinfo := RenewalInfo{time.Now().Unix(), time.Now().Add(time.Minute * state.Config.CertRenewAfterMin).Unix()}\n\tout, err := yaml.Marshal(renewinfo)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\terr = certstoreDir.Put(ctx, RenewalfileName, out)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tstate.RenewalInfoMutex.Lock()\n\tstate.Renewalinfo[domain] = renewinfo\n\tstate.RenewalInfoMutex.Unlock()\n\treturn []byte(newcert), nil\n}", "func GenerateMTLSCerts(cn string, dnsNames []string, ips []string, ttl time.Duration, length int) (*MTLSCerts, error) {\n\tnotBefore := time.Now()\n\tnotAfter := notBefore.Add(ttl)\n\n\tentity := pkix.Name{\n\t\tCountry: []string{\"US\"},\n\t\tCommonName: cn,\n\t}\n\n\tc := &MTLSCerts{\n\t\tcaCert: x509.Certificate{ // caCert is a fluentd CA certificate\n\t\t\tSubject: entity,\n\t\t\tNotBefore: notBefore,\n\t\t\tNotAfter: notAfter,\n\t\t\tIsCA: true,\n\t\t\tMaxPathLenZero: true,\n\t\t\tKeyUsage: x509.KeyUsageCRLSign | x509.KeyUsageCertSign,\n\t\t\tBasicConstraintsValid: true,\n\t\t\tIssuer: entity,\n\t\t},\n\t\tclientCert: x509.Certificate{ // clientCert is a fluentd client certificate\n\t\t\tSubject: entity,\n\t\t\tNotBefore: notBefore,\n\t\t\tNotAfter: notAfter,\n\t\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},\n\t\t\tKeyUsage: x509.KeyUsageDigitalSignature,\n\t\t\tIssuer: entity,\n\t\t},\n\t\tserverCert: x509.Certificate{ // Server CSR\n\t\t\tSubject: entity,\n\t\t\tNotBefore: notBefore,\n\t\t\tNotAfter: notAfter,\n\t\t\tKeyUsage: x509.KeyUsageDigitalSignature,\n\t\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\t\tIssuer: entity,\n\t\t},\n\t}\n\n\t// Generate and assign serial numbers\n\tsn, err := rand.Int(rand.Reader, maxBigInt)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tc.caCert.SerialNumber = sn\n\n\tsn, err = rand.Int(rand.Reader, maxBigInt)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tc.clientCert.SerialNumber = sn\n\n\tsn, err = rand.Int(rand.Reader, maxBigInt)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tc.serverCert.SerialNumber = sn\n\n\t// Append SANs and IPs\n\tif err := c.appendSANs(&c.serverCert, dnsNames, ips); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t// Run the generator\n\terr = c.generate(length)\n\tif err != nil {\n\t\treturn c, err\n\t}\n\n\treturn c, nil\n}", "func cacerts(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\taps := chi.URLParam(r, apsParamName)\n\n\tcerts, err := caFromContext(ctx).CACerts(ctx, aps, r)\n\tif writeOnError(ctx, w, logMsgCACertsFailed, err) {\n\t\treturn\n\t}\n\n\t// Update CA certificates cache with each explicit call to /cacerts.\n\tcertCacheFromContext(ctx).Add(aps, certs)\n\n\twriteResponse(w, mimeTypePKCS7, true, certs)\n}", "func (g InMemoryGenerator) generateCACertificate(request credsgen.CertificateGenerationRequest) (credsgen.Certificate, error) {\n\treq := &csr.CertificateRequest{\n\t\tCA: &csr.CAConfig{Expiry: fmt.Sprintf(\"%dh\", g.Expiry*24)},\n\t\tCN: request.CommonName,\n\t\tKeyRequest: &csr.KeyRequest{A: g.Algorithm, S: g.Bits},\n\t}\n\tca, csr, privateKey, err := initca.New(req)\n\tif err != nil {\n\t\treturn credsgen.Certificate{}, err\n\t}\n\n\tcert := credsgen.Certificate{\n\t\tIsCA: true,\n\t\tCertificate: ca,\n\t\tPrivateKey: privateKey,\n\t}\n\tif request.CA.IsCA {\n\t\tsigningProfile := &config.SigningProfile{\n\t\t\tUsage: []string{\"cert sign\", \"crl sign\"},\n\t\t\tExpiryString: \"43800h\",\n\t\t\tExpiry: 5 * helpers.OneYear,\n\t\t\tCAConstraint: config.CAConstraint{\n\t\t\t\tIsCA: true,\n\t\t\t},\n\t\t}\n\t\tcert.Certificate, err = g.signCertificate(csr, signingProfile, request)\n\t\tif err != nil {\n\t\t\treturn credsgen.Certificate{}, err\n\t\t}\n\t}\n\n\treturn cert, nil\n}", "func storeCAChain(config *lib.ClientConfig, si *lib.GetCAInfoResponse) error {\n\tmspDir := config.MSPDir\n\t// Get a unique name to use for filenames\n\tserverURL, err := url.Parse(config.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfname := serverURL.Host\n\tif config.CAName != \"\" {\n\t\tfname = fmt.Sprintf(\"%s-%s\", fname, config.CAName)\n\t}\n\tfname = strings.Replace(fname, \":\", \"-\", -1)\n\tfname = strings.Replace(fname, \".\", \"-\", -1) + \".pem\"\n\ttlsfname := fmt.Sprintf(\"tls-%s\", fname)\n\n\trootCACertsDir := path.Join(mspDir, \"cacerts\")\n\tintCACertsDir := path.Join(mspDir, \"intermediatecerts\")\n\ttlsRootCACertsDir := path.Join(mspDir, \"tlscacerts\")\n\ttlsIntCACertsDir := path.Join(mspDir, \"tlsintermediatecerts\")\n\n\tvar rootBlks [][]byte\n\tvar intBlks [][]byte\n\tchain := si.CAChain\n\tfor len(chain) > 0 {\n\t\tvar block *pem.Block\n\t\tblock, chain = pem.Decode(chain)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to parse certificate in the CA chain\")\n\t\t}\n\n\t\tif !cert.IsCA {\n\t\t\treturn errors.New(\"A certificate in the CA chain is not a CA certificate\")\n\t\t}\n\n\t\t// If authority key id is not present or if it is present and equal to subject key id,\n\t\t// then it is a root certificate\n\t\tif len(cert.AuthorityKeyId) == 0 || bytes.Equal(cert.AuthorityKeyId, cert.SubjectKeyId) {\n\t\t\trootBlks = append(rootBlks, pem.EncodeToMemory(block))\n\t\t} else {\n\t\t\tintBlks = append(intBlks, pem.EncodeToMemory(block))\n\t\t}\n\t}\n\n\t// Store the root certificates in the \"cacerts\" msp folder\n\tcertBytes := bytes.Join(rootBlks, []byte(\"\"))\n\tif len(certBytes) > 0 {\n\t\tif config.Enrollment.Profile == \"tls\" {\n\t\t\terr := storeToFile(\"TLS root CA certificate\", tlsRootCACertsDir, tlsfname, certBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = storeToFile(\"root CA certificate\", rootCACertsDir, fname, certBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Store the intermediate certificates in the \"intermediatecerts\" msp folder\n\tcertBytes = bytes.Join(intBlks, []byte(\"\"))\n\tif len(certBytes) > 0 {\n\t\tif config.Enrollment.Profile == \"tls\" {\n\t\t\terr = storeToFile(\"TLS intermediate certificates\", tlsIntCACertsDir, tlsfname, certBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = storeToFile(\"intermediate CA certificates\", intCACertsDir, fname, certBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func GenerateCSR(subject string, keylength int) (*KymaCerts, error) {\n\n\tkeyBytes, _ := rsa.GenerateKey(rand.Reader, keylength)\n\n\tsubjectTrimed := strings.TrimSuffix(subject, \",\")\n\tentries := strings.Split(subjectTrimed, \",\")\n\tsubjectMapped := make(map[string]string)\n\n\tfor _, e := range entries {\n\t\tparts := strings.Split(e, \"=\")\n\t\tsubjectMapped[parts[0]] = parts[1]\n\t}\n\n\tsubj := pkix.Name{\n\t\tCommonName: subjectMapped[\"CN\"],\n\t\tCountry: []string{subjectMapped[\"C\"]},\n\t\tProvince: []string{subjectMapped[\"ST\"]},\n\t\tLocality: []string{subjectMapped[\"L\"]},\n\t\tOrganization: []string{subjectMapped[\"O\"]},\n\t\tOrganizationalUnit: []string{subjectMapped[\"OU\"]},\n\t}\n\n\ttype basicConstraints struct {\n\t\tIsCA bool `asn1:\"optional\"`\n\t\tMaxPathLen int `asn1:\"optional,default:-1\"`\n\t}\n\n\tval, _ := asn1.Marshal(basicConstraints{true, 0})\n\n\tvar csrTemplate = x509.CertificateRequest{\n\t\tSubject: subj,\n\t\tSignatureAlgorithm: x509.SHA256WithRSA,\n\t\tExtraExtensions: []pkix.Extension{\n\t\t\t{\n\t\t\t\tId: asn1.ObjectIdentifier{2, 5, 29, 19},\n\t\t\t\tValue: val,\n\t\t\t\tCritical: true,\n\t\t\t},\n\t\t},\n\t}\n\n\tcsrBytes, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, keyBytes)\n\n\tcsr := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"CERTIFICATE REQUEST\", Bytes: csrBytes,\n\t})\n\n\t// step: generate a serial number\n\tserial, _ := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil))\n\n\tnow := time.Now()\n\t// step: create the request template\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serial,\n\t\tSubject: subj,\n\t\tNotBefore: now.Add(-10 * time.Minute).UTC(),\n\t\tNotAfter: now.Add(time.Duration(1200)).UTC(),\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: true,\n\t\tKeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},\n\t}\n\n\t// step: sign the certificate authority\n\tcertificate, _ := x509.CreateCertificate(rand.Reader, &template, &template, &keyBytes.PublicKey, keyBytes)\n\n\tclientCrt := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: certificate})\n\n\tprivateKey := pem.EncodeToMemory(&pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(keyBytes)})\n\n\treturn &KymaCerts{\n\t\tPrivateKey: privateKey,\n\t\tCRT: clientCrt,\n\t\tCSR: csr,\n\t}, nil\n}", "func GenerateCA() (*util_tls.KeyPair, error) {\n\tsubject := pkix.Name{\n\t\tOrganization: []string{\"Kuma\"},\n\t\tOrganizationalUnit: []string{\"Mesh\"},\n\t\tCommonName: \"Envoy Admin CA\",\n\t}\n\treturn util_tls.GenerateCA(util_tls.DefaultKeyType, subject)\n}" ]
[ "0.68635654", "0.65385413", "0.6529925", "0.65109557", "0.6504905", "0.64903206", "0.6441566", "0.6328878", "0.61755025", "0.60668045", "0.6039149", "0.5986134", "0.58960557", "0.5793944", "0.57730484", "0.57411814", "0.5688312", "0.5682993", "0.567077", "0.5652689", "0.5600668", "0.5572516", "0.55702263", "0.5567031", "0.5553817", "0.5510342", "0.5497124", "0.54918736", "0.54864854", "0.5485401", "0.54581743", "0.5453874", "0.5448241", "0.54309267", "0.54293853", "0.5358824", "0.53528196", "0.53502303", "0.53454167", "0.53423005", "0.53378034", "0.53343254", "0.5325997", "0.5320892", "0.5285365", "0.52739006", "0.5269128", "0.52611816", "0.5250245", "0.5249857", "0.52391136", "0.52362114", "0.5235855", "0.5227922", "0.51941603", "0.5183216", "0.51737446", "0.5173162", "0.51680714", "0.515415", "0.5128936", "0.51274896", "0.51273566", "0.5126503", "0.51263344", "0.51251966", "0.5094993", "0.5077721", "0.5067341", "0.50623095", "0.5051695", "0.50447047", "0.5021376", "0.5019774", "0.5010735", "0.50050074", "0.49967033", "0.49958524", "0.4994868", "0.49872196", "0.49830833", "0.49766356", "0.4975405", "0.49617374", "0.49541712", "0.49504825", "0.49436218", "0.4942957", "0.4939062", "0.49286857", "0.4918253", "0.4917357", "0.49144462", "0.4906933", "0.49065018", "0.49048373", "0.49020094", "0.4889972", "0.48866975", "0.4880872" ]
0.7780582
0
NamesInCert find domain names and IPs in server certificate
func NamesInCert(cert_file string) (names []string) { cert, err := ParseCertPemFile(cert_file) if err != nil { log.Printf("ParseCert %s: %v", cert_file, err) return } for _, netip := range cert.IPAddresses { ip := netip.String() names = append(names, ip) } for _, domain := range cert.DNSNames { names = append(names, domain) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getNameservers(resolvConf []byte) []string {\n\tnameservers := []string{}\n\tfor _, line := range getLines(resolvConf) {\n\t\tns := nsRegexp.FindSubmatch(line)\n\t\tif len(ns) > 0 {\n\t\t\tnameservers = append(nameservers, string(ns[1]))\n\t\t}\n\t}\n\treturn nameservers\n}", "func GetNameservers(resolvConf []byte, kind int) []string {\n\tvar nameservers []string\n\tfor _, line := range getLines(resolvConf, []byte(\"#\")) {\n\t\tvar ns [][]byte\n\t\tif kind == IP {\n\t\t\tns = nsRegexp.FindSubmatch(line)\n\t\t} else if kind == IPv4 {\n\t\t\tns = nsIPv4Regexpmatch.FindSubmatch(line)\n\t\t} else if kind == IPv6 {\n\t\t\tns = nsIPv6Regexpmatch.FindSubmatch(line)\n\t\t}\n\t\tif len(ns) > 0 {\n\t\t\tnameservers = append(nameservers, string(ns[1]))\n\t\t}\n\t}\n\treturn nameservers\n}", "func ExtractDomains(spec *api.CertificateSpec) ([]string, error) {\n\tvar err error\n\tcn := spec.CommonName\n\tif cn == nil || *cn == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing common name\")\n\t}\n\tdnsNames := spec.DNSNames\n\tif spec.CommonName != nil {\n\t\tif spec.CSR != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot specify both commonName and csr\")\n\t\t}\n\t\tif len(spec.DNSNames) >= 100 {\n\t\t\treturn nil, fmt.Errorf(\"invalid number of DNS names: %d (max 99)\", len(spec.DNSNames))\n\t\t}\n\t\tcount := utf8.RuneCount([]byte(*spec.CommonName))\n\t\tif count > 64 {\n\t\t\treturn nil, fmt.Errorf(\"the Common Name is limited to 64 characters (X.509 ASN.1 specification), but first given domain %s has %d characters\", *spec.CommonName, count)\n\t\t}\n\t} else {\n\t\tif spec.CSR == nil {\n\t\t\treturn nil, fmt.Errorf(\"either domains or csr must be specified\")\n\t\t}\n\t\tcn, dnsNames, err = ExtractCommonNameAnDNSNames(spec.CSR)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn append([]string{*cn}, dnsNames...), nil\n}", "func NamesFromCertificateRequest(req *x509.CertificateRequest) []string {\n\tvar names []string\n\n\tif req.Subject.CommonName != \"\" {\n\t\tnames = append(names, req.Subject.CommonName)\n\t}\n\n\tfor _, n := range req.DNSNames {\n\t\tif req.Subject.CommonName == n {\n\t\t\tcontinue\n\t\t}\n\t\tnames = append(names, n)\n\t}\n\n\treturn names\n}", "func subjectAltNames(namespace, svcName string) ([]string, []net.IP) {\n\treturn []string{\n\t\t\"localhost\",\n\t\tsvcName,\n\t\tfmt.Sprintf(\"%v.%v.svc\", svcName, namespace),\n\t\tfmt.Sprintf(\"%v.%v.svc.cluster.local\", svcName, namespace),\n\t}, []net.IP{net.ParseIP(\"127.0.0.1\")}\n}", "func GetNameserversAsCIDR(resolvConf []byte) []string {\n\tvar nameservers []string\n\tfor _, nameserver := range GetNameservers(resolvConf, IP) {\n\t\tvar address string\n\t\t// If IPv6, strip zone if present\n\t\tif strings.Contains(nameserver, \":\") {\n\t\t\taddress = strings.Split(nameserver, \"%\")[0] + \"/128\"\n\t\t} else {\n\t\t\taddress = nameserver + \"/32\"\n\t\t}\n\t\tnameservers = append(nameservers, address)\n\t}\n\treturn nameservers\n}", "func getDNSNameservers(resolvConfPath string) ([]string, error) {\n\tif resolvConfPath == \"\" {\n\t\tresolvConfPath = defaultResolvConfPath\n\t}\n\n\tfile, err := os.Open(resolvConfPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Could not open '%s'.\", resolvConfPath)\n\t}\n\tdefer mustClose(file)\n\n\tscanner := bufio.NewScanner(file)\n\n\tvar servers []string\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tmatch := resolvConfNameserverPattern.FindStringSubmatch(line)\n\t\tif len(match) == 2 {\n\t\t\tservers = append(servers, match[1])\n\t\t}\n\t}\n\n\tif err = scanner.Err(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Could not read '%s'.\", resolvConfPath)\n\t}\n\n\tif len(servers) == 0 {\n\t\treturn nil, errors.Errorf(\"No nameservers found in '%s'.\", resolvConfPath)\n\t}\n\n\treturn servers, nil\n}", "func parseNameServer() ([]net.IP, error) {\n\tfile, err := os.Open(\"/etc/resolv.conf\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error opening /etc/resolv.conf: %v\", err)\n\t}\n\tdefer file.Close()\n\n\tscan := bufio.NewScanner(file)\n\tscan.Split(bufio.ScanLines)\n\n\tip := make([]net.IP, 0)\n\n\tfor scan.Scan() {\n\t\tserverString := scan.Text()\n\t\tif strings.Contains(serverString, \"nameserver\") {\n\t\t\ttmpString := strings.Replace(serverString, \"nameserver\", \"\", 1)\n\t\t\tnameserver := strings.TrimSpace(tmpString)\n\t\t\tsip := net.ParseIP(nameserver)\n\t\t\tif sip != nil && !sip.Equal(config.Config.ListenIP) {\n\t\t\t\tip = append(ip, sip)\n\t\t\t}\n\t\t}\n\t}\n\tif len(ip) == 0 {\n\t\treturn nil, fmt.Errorf(\"there is no nameserver in /etc/resolv.conf\")\n\t}\n\treturn ip, nil\n}", "func (c *Client) DNSNameservers(ctx context.Context) ([]string, error) {\n\tconst uriFmt = \"/api/v2/domain/%v/dns/nameservers\"\n\n\treq, err := c.buildRequest(ctx, http.MethodGet, fmt.Sprintf(uriFmt, c.domain), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp DomainDNSNameservers\n\tif err = c.performRequest(req, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.DNS, nil\n}", "func GetCertificateForConnect(serverName string) ([]string, error) {\n\tdataPath := GetTLSCertificateDataPath()\n\tcertPath, err := filepath.Abs(filepath.Join(dataPath, ServerNameWithoutPort(serverName)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !strings.HasPrefix(certPath, dataPath) {\n\t\treturn nil, fmt.Errorf(\"could not get certificate for host %s\", serverName)\n\t}\n\tcertificates, err := ParseTLSCertificatesFromPath(certPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(certificates) == 0 {\n\t\treturn nil, fmt.Errorf(\"no certificates found in existing file\")\n\t}\n\n\treturn certificates, nil\n}", "func getDNSConf() []string {\n\tservers := []string{}\n\t_, err := os.Stat(\"/etc/resolv.conf\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tj, _ := dns.ClientConfigFromFile(\"/etc/resolv.conf\")\n\n\tservers = append(servers, fmt.Sprintf(\"%s:53\", j.Servers[0]))\n\tif len(servers) < 2 {\n\t\tservers = append(servers, fmt.Sprintf(\"%s:53\", j.Servers[0]))\n\t} else {\n\t\tservers = append(servers, fmt.Sprintf(\"%s:53\", j.Servers[1]))\n\t}\n\n\treturn servers\n\n}", "func (m *MacOSEnterpriseWiFiConfiguration) GetTrustedServerCertificateNames()([]string) {\n val, err := m.GetBackingStore().Get(\"trustedServerCertificateNames\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}", "func ExtractCommonNameAnDNSNames(csr []byte) (cn *string, san []string, err error) {\n\tcertificateRequest, err := extractCertificateRequest(csr)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"parsing CSR failed: %w\", err)\n\t\treturn\n\t}\n\tcnvalue := certificateRequest.Subject.CommonName\n\tcn = &cnvalue\n\tsan = certificateRequest.DNSNames[:]\n\tfor _, ip := range certificateRequest.IPAddresses {\n\t\tsan = append(san, ip.String())\n\t}\n\treturn\n}", "func getSearchDomains(resolvConf []byte) []string {\n\tdomains := []string{}\n\tfor _, line := range getLines(resolvConf) {\n\t\tmatch := searchRegexp.FindSubmatch(line)\n\t\tif match == nil {\n\t\t\tcontinue\n\t\t}\n\t\tdomains = strings.Fields(string(match[1]))\n\t}\n\treturn domains\n}", "func (g *Domain) GetNameServers(domain string) (nameservers []string, err error) {\n\t_, err = g.client.Get(\"domains/\"+domain+\"/nameservers\", nil, &nameservers)\n\treturn\n}", "func AssertCertificateHasDNSNames(t *testing.T, cert *x509.Certificate, DNSNames ...string) {\n\tfor _, DNSName := range DNSNames {\n\t\tfound := false\n\t\tfor _, val := range cert.DNSNames {\n\t\t\tif val == DNSName {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\tt.Errorf(\"cert does not contain DNSName %s\", DNSName)\n\t\t}\n\t}\n}", "func cmdGetHostnames() ([]string, map[string]string, error) {\n\t// Sanity check conflictable values\n\tif *useHostname && *requiredSuffix != \"\" {\n\t\treturn []string{}, nil, errors.New(\"--use-hostname overrides --required-suffix, meaning it will have no effect.\")\n\t}\n\n\tvar suffix string\n\tif *useHostname {\n\t\tsuffix = *hostname\n\t} else if *requiredSuffix != \"\" {\n\t\tsuffix = *requiredSuffix\n\t}\n\n\tpairs, err := resolveIPsToHostnames()\n\tif err != nil {\n\t\treturn []string{}, nil, errors.New(fmt.Sprintln(\"Error while resolving local IPs to hostnames:\", err))\n\t}\n\n\tresultMap := make(map[string]string)\n\tmatchedIPs := []string{}\n\tfor _, pair := range pairs {\n\t\tfor _, hostname := range pair.Hostnames {\n\t\t\tif strings.HasSuffix(hostname, suffix) {\n\t\t\t\texistingValue, ok := resultMap[pair.Ip.String()]\n\t\t\t\tif ok {\n\t\t\t\t\tresultMap[pair.Ip.String()] = strings.Join([]string{existingValue, hostname}, *entryJoiner)\n\t\t\t\t} else {\n\t\t\t\t\tresultMap[pair.Ip.String()] = hostname\n\t\t\t\t\tmatchedIPs = append(matchedIPs, pair.Ip.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Debugln(\"Exiting successfully.\")\n\treturn matchedIPs, resultMap, nil\n}", "func ExpectCertificateDNSNamesToMatch(csr *certificatesv1.CertificateSigningRequest, _ crypto.Signer) error {\n\tcert, err := pki.DecodeX509CertificateBytes(csr.Status.Certificate)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := pki.DecodeX509CertificateRequestBytes(csr.Spec.Request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !util.EqualUnsorted(cert.DNSNames, req.DNSNames) &&\n\t\t!util.EqualUnsorted(cert.DNSNames, append(req.DNSNames, req.Subject.CommonName)) {\n\t\treturn fmt.Errorf(\"Expected certificate valid for DNSNames %v, but got a certificate valid for DNSNames %v\", req.DNSNames, cert.DNSNames)\n\t}\n\n\treturn nil\n}", "func verifyCertSkipHostname(roots *x509.CertPool) func([][]byte, [][]*x509.Certificate) error {\n\treturn func(certs [][]byte, _ [][]*x509.Certificate) error {\n\t\topts := x509.VerifyOptions{\n\t\t\tRoots: roots,\n\t\t\tCurrentTime: time.Now(),\n\t\t\tIntermediates: x509.NewCertPool(),\n\t\t}\n\n\t\tleaf, err := x509.ParseCertificate(certs[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, asn1Data := range certs[1:] {\n\t\t\tcert, err := x509.ParseCertificate(asn1Data)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\topts.Intermediates.AddCert(cert)\n\t\t}\n\n\t\t_, err = leaf.Verify(opts)\n\t\treturn err\n\t}\n}", "func verifyCertSkipHostname(roots *x509.CertPool) func([][]byte, [][]*x509.Certificate) error {\n\treturn func(certs [][]byte, _ [][]*x509.Certificate) error {\n\t\topts := x509.VerifyOptions{\n\t\t\tRoots: roots,\n\t\t\tCurrentTime: time.Now(),\n\t\t\tIntermediates: x509.NewCertPool(),\n\t\t}\n\n\t\tleaf, err := x509.ParseCertificate(certs[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, asn1Data := range certs[1:] {\n\t\t\tcert, err := x509.ParseCertificate(asn1Data)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\topts.Intermediates.AddCert(cert)\n\t\t}\n\n\t\t_, err = leaf.Verify(opts)\n\t\treturn err\n\t}\n}", "func (c *Certificate) CollectAllNames() []string {\n\tvar names []string\n\n\tif isValidName(c.Subject.CommonName) {\n\t\tnames = append(names, c.Subject.CommonName)\n\t}\n\n\tfor _, name := range c.DNSNames {\n\t\tif isValidName(name) {\n\t\t\tnames = append(names, name)\n\t\t} else if !strings.Contains(name, \".\") { //just a TLD\n\t\t\tnames = append(names, name)\n\t\t}\n\n\t}\n\n\tfor _, name := range c.URIs {\n\t\tif util.IsURL(name) {\n\t\t\tnames = append(names, name)\n\t\t}\n\t}\n\n\tfor _, name := range c.IPAddresses {\n\t\tstr := name.String()\n\t\tif util.IsURL(str) {\n\t\t\tnames = append(names, str)\n\t\t}\n\t}\n\n\treturn purgeNameDuplicates(names)\n}", "func ToNameservers(nss []string) ([]*Nameserver, error) {\n\tnservers := []*Nameserver{}\n\tfor _, ns := range nss {\n\t\tif strings.HasSuffix(ns, \".\") {\n\t\t\treturn nil, fmt.Errorf(\"provider code leaves trailing dot on nameserver\")\n\t\t\t// If you see this error, maybe the provider should call\n\t\t\t// ToNameserversStripTD instead.\n\t\t}\n\t\tnservers = append(nservers, &Nameserver{Name: ns})\n\t}\n\treturn nservers, nil\n}", "func (dns *EdgeDNS) parseNameServer() ([]net.IP, error) {\n\tfile, err := os.Open(\"/etc/resolv.conf\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error opening /etc/resolv.conf: %v\", err)\n\t}\n\tdefer file.Close()\n\n\tscan := bufio.NewScanner(file)\n\tscan.Split(bufio.ScanLines)\n\n\tip := make([]net.IP, 0)\n\n\tfor scan.Scan() {\n\t\tserverString := scan.Text()\n\t\tif strings.Contains(serverString, \"nameserver\") {\n\t\t\ttmpString := strings.Replace(serverString, \"nameserver\", \"\", 1)\n\t\t\tnameserver := strings.TrimSpace(tmpString)\n\t\t\tsip := net.ParseIP(nameserver)\n\t\t\tif sip != nil && !sip.Equal(dns.ListenIP) {\n\t\t\t\tip = append(ip, sip)\n\t\t\t}\n\t\t}\n\t}\n\tif len(ip) == 0 {\n\t\treturn nil, fmt.Errorf(\"there is no nameserver in /etc/resolv.conf\")\n\t}\n\treturn ip, nil\n}", "func getCertExtensions(cert *x509.Certificate) Extensions {\n\t// initialize []string to store them as `[]` instead of null\n\tsan := make([]string, 0)\n\tsan = append(san, cert.DNSNames...)\n\tcrld := make([]string, 0)\n\tcrld = append(crld, cert.CRLDistributionPoints...)\n\tconstraints, _ := certconstraints.Get(cert)\n\tipNetSliceToStringSlice := func(in []*net.IPNet) []string {\n\t\tout := make([]string, 0)\n\t\tfor _, ipnet := range in {\n\t\t\tout = append(out, ipnet.String())\n\t\t}\n\t\treturn out\n\t}\n\tpermittedIPAddresses := ipNetSliceToStringSlice(constraints.PermittedIPRanges)\n\texcludedIPAddresses := ipNetSliceToStringSlice(constraints.ExcludedIPRanges)\n\text := Extensions{\n\t\tAuthorityKeyId: base64.StdEncoding.EncodeToString(cert.AuthorityKeyId),\n\t\tSubjectKeyId: base64.StdEncoding.EncodeToString(cert.SubjectKeyId),\n\t\tKeyUsage: getKeyUsages(cert),\n\t\tExtendedKeyUsage: getExtKeyUsages(cert),\n\t\tExtendedKeyUsageOID: getExtKeyUsageOIDs(cert),\n\t\tPolicyIdentifiers: getPolicyIdentifiers(cert),\n\t\tSubjectAlternativeName: san,\n\t\tCRLDistributionPoints: crld,\n\t\tPermittedDNSDomains: constraints.PermittedDNSDomains,\n\t\tExcludedDNSDomains: constraints.ExcludedDNSDomains,\n\t\tPermittedIPAddresses: permittedIPAddresses,\n\t\tExcludedIPAddresses: excludedIPAddresses,\n\t\tIsTechnicallyConstrained: certconstraints.IsTechnicallyConstrained(cert),\n\t}\n\treturn ext\n}", "func mynames() ([]string, error) {\n\th, err := os.Hostname()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret := []string{h, \"127.0.0.1/8\", \"::1/128\"}\n\treturn ret, nil\n}", "func isSubjectAlternateName(cert *x509.Certificate, name string) bool {\n\tfor _, dnsName := range cert.DNSNames {\n\t\tif dnsName == name {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, emailAddress := range cert.EmailAddresses {\n\t\tif emailAddress == name {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, ip := range cert.IPAddresses {\n\t\tif ip.String() == name {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, url := range cert.URIs {\n\t\tif url.String() == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func CommonNamesForCertPEM(certPEM []byte) ([]string, error) {\n\tcerts, err := ParseCertPEM(certPEM)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toutput := make([]string, len(certs))\n\tfor index, cert := range certs {\n\t\toutput[index] = cert.Subject.CommonName\n\t}\n\treturn output, nil\n}", "func (a *Server) validate(req *http.Request) error {\n\t// if `requestheader-allowed-names` was empty, allow any CN\n\tif len(a.allowedNames) > 0 {\n\t\tfor _, cn := range a.allowedNames {\n\t\t\tfor _, clientCert := range req.TLS.PeerCertificates {\n\t\t\t\t// Check Common Name and Subject Alternate Name(s)\n\t\t\t\tif cn == clientCert.Subject.CommonName || isSubjectAlternateName(clientCert, cn) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Build the set of certificate names for the error message\n\t\tclientNames := []string{}\n\t\tfor _, clientCert := range req.TLS.PeerCertificates {\n\t\t\tclientNames = append(clientNames, clientCert.Subject.CommonName)\n\t\t}\n\t\treturn fmt.Errorf(\"no valid CN found. allowed names: %s, client names: %s\", a.allowedNames, clientNames)\n\t}\n\treturn nil\n}", "func loadCertFiles(ctx context.Context, certsDir string) ([]hostConfig, error) {\n\tfs, err := os.ReadDir(certsDir)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\thosts := make([]hostConfig, 1)\n\tfor _, f := range fs {\n\t\tif f.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasSuffix(f.Name(), \".crt\") {\n\t\t\thosts[0].caCerts = append(hosts[0].caCerts, filepath.Join(certsDir, f.Name()))\n\t\t}\n\t\tif strings.HasSuffix(f.Name(), \".cert\") {\n\t\t\tvar pair [2]string\n\t\t\tcertFile := f.Name()\n\t\t\tpair[0] = filepath.Join(certsDir, certFile)\n\t\t\t// Check if key also exists\n\t\t\tkeyFile := filepath.Join(certsDir, certFile[:len(certFile)-5]+\".key\")\n\t\t\tif _, err := os.Stat(keyFile); err == nil {\n\t\t\t\tpair[1] = keyFile\n\t\t\t} else if !os.IsNotExist(err) {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\thosts[0].clientPairs = append(hosts[0].clientPairs, pair)\n\t\t}\n\t}\n\treturn hosts, nil\n}", "func hostnameInSNI(name string) string {\n\thost := name\n\tif len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {\n\t\thost = host[1 : len(host)-1]\n\t}\n\tif i := strings.LastIndex(host, \"%\"); i > 0 {\n\t\thost = host[:i]\n\t}\n\tif net.ParseIP(host) != nil {\n\t\treturn \"\"\n\t}\n\tfor len(name) > 0 && name[len(name)-1] == '.' {\n\t\tname = name[:len(name)-1]\n\t}\n\treturn name\n}", "func (o SslCertificateManagedSslCertificateOutput) Domains() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v SslCertificateManagedSslCertificate) []string { return v.Domains }).(pulumi.StringArrayOutput)\n}", "func cmdGetIPs() ([]string, map[string]string, error) {\n\t// Sanity check conflictable values\n\tif *useHostname && *requiredSuffix != \"\" {\n\t\treturn []string{}, nil, errors.New(\"--use-hostname overrides --required-suffix, meaning it will have no effect.\")\n\t}\n\n\tvar suffix string\n\tif *useHostname {\n\t\tsuffix = *hostname\n\t} else if *requiredSuffix != \"\" {\n\t\tsuffix = *requiredSuffix\n\t}\n\n\tpairs, err := resolveIPsToHostnames()\n\tif err != nil {\n\t\treturn []string{}, nil, errors.New(fmt.Sprintln(\"Error while resolving local IPs to hostnames:\", err))\n\t}\n\n\tresultMap := make(map[string]string)\n\tmatchedHostnames := []string{}\n\tfor _, pair := range pairs {\n\t\tfor _, hostname := range pair.Hostnames {\n\t\t\tif strings.HasSuffix(hostname, suffix) {\n\t\t\t\tresultMap[hostname] = pair.Ip.String()\n\t\t\t\tmatchedHostnames = append(matchedHostnames, hostname)\n\t\t\t\tbreak // Found a match for this IP - break loop\n\t\t\t}\n\t\t}\n\t}\n\n\treturn matchedHostnames, resultMap, nil\n}", "func ClientCertSubstrings() string {\n\treturn clientCertSubstrings\n}", "func (e *endpoints) getCerts(ctx context.Context) ([]tls.Certificate, *x509.CertPool, error) {\n\tds := e.c.Catalog.DataStores()[0]\n\n\tresp, err := ds.FetchBundle(ctx, &datastore_pb.FetchBundleRequest{\n\t\tTrustDomainId: e.c.TrustDomain.String(),\n\t})\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"get bundle from datastore: %v\", err)\n\t}\n\tif resp.Bundle == nil {\n\t\treturn nil, nil, errors.New(\"bundle not found\")\n\t}\n\n\tvar caCerts []*x509.Certificate\n\tfor _, rootCA := range resp.Bundle.RootCas {\n\t\trootCACerts, err := x509.ParseCertificates(rootCA.DerBytes)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"parse bundle: %v\", err)\n\t\t}\n\t\tcaCerts = append(caCerts, rootCACerts...)\n\t}\n\n\tcaPool := x509.NewCertPool()\n\tfor _, c := range caCerts {\n\t\tcaPool.AddCert(c)\n\t}\n\n\te.mtx.RLock()\n\tdefer e.mtx.RUnlock()\n\n\tcertChain := [][]byte{}\n\tfor i, cert := range e.svid {\n\t\tcertChain = append(certChain, cert.Raw)\n\t\t// add the intermediates into the root CA pool since we need to\n\t\t// validate old agents that don't present intermediates with the\n\t\t// certificate request.\n\t\t// TODO: remove this hack in 0.8\n\t\tif i > 0 {\n\t\t\tcaPool.AddCert(cert)\n\t\t}\n\t}\n\n\ttlsCert := tls.Certificate{\n\t\tCertificate: certChain,\n\t\tPrivateKey: e.svidKey,\n\t}\n\n\treturn []tls.Certificate{tlsCert}, caPool, nil\n}", "func (c *Controller) ListCertificates (w http.ResponseWriter, r *http.Request) {\n\n vars := mux.Vars(r)\n hostname := vars[\"hostname\"]\n\n results := c.monitor.listCerts(hostname)\n\n fmt.Fprintf(w, \"Certificates for %s:\\n\", hostname)\n\n for _, entry := range results {\n fmt.Fprintf(w, \"%v\\n\", entry) // prettify with json?\n }\n\n}", "func (r Dns_Domain_Registration) GetDomainNameservers() (resp []datatypes.Container_Dns_Domain_Registration_Nameserver, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_Registration\", \"getDomainNameservers\", nil, &r.Options, &resp)\n\treturn\n}", "func validateNames(data *dataBundle, names []string) string {\n\tfor _, name := range names {\n\t\tsanitizedName := name\n\t\temailDomain := name\n\t\tisEmail := false\n\t\tisWildcard := false\n\n\t\t// If it has an @, assume it is an email address and separate out the\n\t\t// user from the hostname portion so that we can act on the hostname.\n\t\t// Note that this matches behavior from the alt_names parameter. If it\n\t\t// ends up being problematic for users, I guess that could be separated\n\t\t// into dns_names and email_names in the future to be explicit, but I\n\t\t// don't think this is likely.\n\t\tif strings.Contains(name, \"@\") {\n\t\t\tsplitEmail := strings.Split(name, \"@\")\n\t\t\tif len(splitEmail) != 2 {\n\t\t\t\treturn name\n\t\t\t}\n\t\t\tsanitizedName = splitEmail[1]\n\t\t\temailDomain = splitEmail[1]\n\t\t\tisEmail = true\n\t\t}\n\n\t\t// If we have an asterisk as the first part of the domain name, mark it\n\t\t// as wildcard and set the sanitized name to the remainder of the\n\t\t// domain\n\t\tif strings.HasPrefix(sanitizedName, \"*.\") {\n\t\t\tsanitizedName = sanitizedName[2:]\n\t\t\tisWildcard = true\n\t\t}\n\n\t\t// Email addresses using wildcard domain names do not make sense\n\t\tif isEmail && isWildcard {\n\t\t\treturn name\n\t\t}\n\n\t\t// AllowAnyName is checked after this because EnforceHostnames still\n\t\t// applies when allowing any name. Also, we check the sanitized name to\n\t\t// ensure that we are not either checking a full email address or a\n\t\t// wildcard prefix.\n\t\tif data.role.EnforceHostnames {\n\t\t\tp := idna.New(\n\t\t\t\tidna.StrictDomainName(true),\n\t\t\t\tidna.VerifyDNSLength(true),\n\t\t\t)\n\t\t\tconverted, err := p.ToASCII(sanitizedName)\n\t\t\tif err != nil {\n\t\t\t\treturn name\n\t\t\t}\n\t\t\tif !hostnameRegex.MatchString(converted) {\n\t\t\t\treturn name\n\t\t\t}\n\t\t}\n\n\t\t// Self-explanatory\n\t\tif data.role.AllowAnyName {\n\t\t\tcontinue\n\t\t}\n\n\t\t// The following blocks all work the same basic way:\n\t\t// 1) If a role allows a certain class of base (localhost, token\n\t\t// display name, role-configured domains), perform further tests\n\t\t//\n\t\t// 2) If there is a perfect match on either the name itself or it's an\n\t\t// email address with a perfect match on the hostname portion, allow it\n\t\t//\n\t\t// 3) If subdomains are allowed, we check based on the sanitized name;\n\t\t// note that if not a wildcard, will be equivalent to the email domain\n\t\t// for email checks, and we already checked above for both a wildcard\n\t\t// and email address being present in the same name\n\t\t// 3a) First we check for a non-wildcard subdomain, as in <name>.<base>\n\t\t// 3b) Then we check if it's a wildcard and the base domain is a match\n\t\t//\n\t\t// Variances are noted in-line\n\n\t\tif data.role.AllowLocalhost {\n\t\t\tif name == \"localhost\" ||\n\t\t\t\tname == \"localdomain\" ||\n\t\t\t\t(isEmail && emailDomain == \"localhost\") ||\n\t\t\t\t(isEmail && emailDomain == \"localdomain\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif data.role.AllowSubdomains {\n\t\t\t\t// It is possible, if unlikely, to have a subdomain of \"localhost\"\n\t\t\t\tif strings.HasSuffix(sanitizedName, \".localhost\") ||\n\t\t\t\t\t(isWildcard && sanitizedName == \"localhost\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// A subdomain of \"localdomain\" is also not entirely uncommon\n\t\t\t\tif strings.HasSuffix(sanitizedName, \".localdomain\") ||\n\t\t\t\t\t(isWildcard && sanitizedName == \"localdomain\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif data.role.AllowTokenDisplayName {\n\t\t\tif name == data.req.DisplayName {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif data.role.AllowSubdomains {\n\t\t\t\tif isEmail {\n\t\t\t\t\t// If it's an email address, we need to parse the token\n\t\t\t\t\t// display name in order to do a proper comparison of the\n\t\t\t\t\t// subdomain\n\t\t\t\t\tif strings.Contains(data.req.DisplayName, \"@\") {\n\t\t\t\t\t\tsplitDisplay := strings.Split(data.req.DisplayName, \"@\")\n\t\t\t\t\t\tif len(splitDisplay) == 2 {\n\t\t\t\t\t\t\t// Compare the sanitized name against the hostname\n\t\t\t\t\t\t\t// portion of the email address in the broken\n\t\t\t\t\t\t\t// display name\n\t\t\t\t\t\t\tif strings.HasSuffix(sanitizedName, \".\"+splitDisplay[1]) {\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif strings.HasSuffix(sanitizedName, \".\"+data.req.DisplayName) ||\n\t\t\t\t\t(isWildcard && sanitizedName == data.req.DisplayName) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(data.role.AllowedDomains) > 0 {\n\t\t\tvalid := false\n\t\t\tfor _, currDomain := range data.role.AllowedDomains {\n\t\t\t\t// If there is, say, a trailing comma, ignore it\n\t\t\t\tif currDomain == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// First, allow an exact match of the base domain if that role flag\n\t\t\t\t// is enabled\n\t\t\t\tif data.role.AllowBareDomains &&\n\t\t\t\t\t(name == currDomain ||\n\t\t\t\t\t\t(isEmail && emailDomain == currDomain)) {\n\t\t\t\t\tvalid = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif data.role.AllowSubdomains {\n\t\t\t\t\tif strings.HasSuffix(sanitizedName, \".\"+currDomain) ||\n\t\t\t\t\t\t(isWildcard && sanitizedName == currDomain) {\n\t\t\t\t\t\tvalid = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif data.role.AllowGlobDomains &&\n\t\t\t\t\tstrings.Contains(currDomain, \"*\") &&\n\t\t\t\t\tglob.Glob(currDomain, name) {\n\t\t\t\t\tvalid = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif valid {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\treturn name\n\t}\n\n\treturn \"\"\n}", "func resolveHostnames() ([]string, error) {\n\tpairs, err := resolveIPsToHostnames()\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\thostnames := []string{}\n\tfor _, pair := range pairs {\n\t\thostnames = append(hostnames, pair.Hostnames...)\n\t}\n\treturn hostnames, nil\n}", "func (o SslCertificateManagedSslCertificateResponseOutput) Domains() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v SslCertificateManagedSslCertificateResponse) []string { return v.Domains }).(pulumi.StringArrayOutput)\n}", "func (api *packetframeProvider) GetNameservers(domain string) ([]*models.Nameserver, error) {\n\treturn models.ToNameservers(defaultNameServerNames)\n}", "func GetNameServersFromResolveConfig(filePath string) ([]string, error) {\n\tcontent, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnameservers := ParseNameServers(content)\n\treturn nameservers, nil\n}", "func getSSLInfo(url string) (data []string) {\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\tseedUrl := url\n\tresp, err := client.Get(seedUrl)\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tfmt.Errorf(seedUrl, \" 请求失败\")\n\t\tpanic(err)\n\t}\n\n\t//fmt.Println(resp.TLS.PeerCertificates[0])\n\tcertInfo := resp.TLS.PeerCertificates[0]\n\tNotBefore := certInfo.NotBefore.Format(\"2006-01-02 15:04:05\")\n\tNotAfter := certInfo.NotAfter.Format(\"2006-01-02 15:04:05\")\n\tbb := certInfo.NotAfter\n\tnow := time.Now()\n\tnn := math.Ceil(bb.Sub(now).Hours() / 24)\n\tmm := strconv.FormatFloat(nn, 'f', 2, 64)\n\t//DNSNames := strings.Join(certInfo.DNSNames,\"\")\n\t//Subject := certInfo.Subject.String()\n\t//Issuer := certInfo.Issuer.String()\n\n\t//data := [][]string{\n\t//\t[]string{\"https\", seedUrl, NotBefore, NotAfter, mm},\n\t//}\n\tdata = []string{\"https\", seedUrl, NotBefore, NotAfter, mm}\n return\n}", "func GetURINamesFromCertificate(cert *x509.Certificate) (uris []string, err error) {\n\tfor _, ext := range GetExtensionsFromAsn1ObjectIdentifier(cert, OidExtensionSubjectAltName) {\n\t\turis, err = getURINamesFromSANExtension(ext.Value)\n\t\tif err != nil {\n\t\t\treturn uris, err\n\t\t}\n\t}\n\n\treturn uris, nil\n}", "func (p *Proxy) ServerNames() ([]string, error) {\n ns := []string{\"Error-Getting-Server-Names\"}\n rcon, err := p.GetRcon()\n if err != nil { return ns, err }\n\n command := fmt.Sprintf(\"bconf getServers().getKeys()\")\n reply, err := rcon.Send(command)\n if err != nil { return ns, err }\n\n reply = strings.Trim(reply, \"[] \\n\")\n names := strings.Split(reply, \",\")\n for i, n := range names {\n names[i] = strings.Trim(n, \" \")\n }\n return names, nil\n}", "func (o *LdapProvider) GetHostnames() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Hostnames\n}", "func domainConfig(cfg *admin.CertificateAuthorityConfig, domain string) *admin.DomainConfig {\n\tfor _, domainCfg := range cfg.KnownDomains {\n\t\tfor _, domainInCfg := range domainCfg.Domain {\n\t\t\tif domainInCfg == domain || strings.HasSuffix(domain, \".\"+domainInCfg) {\n\t\t\t\treturn domainCfg\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (w *worker) resolveFromGluelessNameSrvs(nameSrvs []*zonecut.NameSrvInfo) (*nameresolver.Entry, *errors.ErrorStack) {\n\tvar errList []string\nOuterloop:\n\tfor _, ns := range nameSrvs {\n\t\tvar addrs []net.IP\n\t\t// requestedName is the nameserver name, by default. It may evolve, as aliases/CNAME are met along the resolution\n\t\trequestedName := ns.Name()\n\t\t// We limit to MAX_CNAME_CHAIN the number of CNAME that we are willing to follow\n\t\tInnerloop:\n\t\tfor i := 0; i < MAX_CNAME_CHAIN && len(addrs) == 0; i++ {\n\t\t\t// Start up the resolution of the name of the nameserver into IP addresses so that we can query these IP\n\t\t\t// addresses for the request topic of this worker.\n\t\t\treq := nameresolver.NewRequestWithContext(requestedName, w.req.Exceptions(), w.req)\n\t\t\tw.nrHandler(req)\n\n\t\t\tne, err := req.Result()\n\t\t\tif err != nil || ne == nil {\n\t\t\t\t// if an error occurred, we just try with the next nameserver until we get an answer or all servers have\n\t\t\t\t// been tried.\n\t\t\t\tcontinue Outerloop\n\t\t\t}\n\n\t\t\tif ne.CNAMETarget() == \"\" {\n\t\t\t\t// We got some IP addresses ; we store them away and go to the next step\n\t\t\t\taddrs = ne.Addrs()\n\t\t\t\tbreak Innerloop\n\t\t\t}\n\t\t\t// If the answer is an alias, we retry with the new target name\n\t\t\trequestedName = ne.CNAMETarget()\n\t\t}\n\n\t\tif len(addrs) == 0 {\n\t\t\t// We hit a very long CNAME Chain or the name cannot be resolved for some reason\n\t\t\tcontinue\n\t\t}\n\n\t\t// Try to query every IP that we found, until we get a valid answer\n\t\tfor _, addr := range addrs {\n\t\t\tentry, err := w.resolveFrom(addr)\n\t\t\tif err == nil {\n\t\t\t\treturn entry, nil\n\t\t\t}\n\t\t\terrList = append(errList, fmt.Sprintf(\"resolveFromGluelessNameSrvs: error from %s(%s): %s\", ns.Name(), addr.String(), err.Error()))\n\t\t}\n\t}\n\t// We tried every IP address of every name server to no avail. Return an error\n\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromGluelessNameSrvs: no valid glueless delegation for %s: [%s]\", w.req.Name(), strings.Join(errList, \", \")))\n}", "func ConnVerifyHostname(c *tls.Conn, host string) error", "func getIDsFromCertificate(peer *x509.Certificate) (string, string, error) {\n\tswitch {\n\tcase len(peer.URIs) == 0:\n\t\treturn \"\", \"\", errors.New(\"peer certificate contains no URI SAN\")\n\tcase len(peer.URIs) > 1:\n\t\treturn \"\", \"\", errors.New(\"peer certificate contains more than one URI SAN\")\n\t}\n\n\tid := peer.URIs[0]\n\n\tif err := ValidateURI(id, AllowAny()); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn id.String(), TrustDomainID(id.Host), nil\n}", "func TestCnameLookup(t *testing.T) {\n\tetc := newEtcdPlugin()\n\n\tfor _, serv := range servicesCname {\n\t\tset(t, etc, serv.Key, 0, serv)\n\t\tdefer delete(t, etc, serv.Key)\n\t}\n\tfor i, tc := range dnsTestCasesCname {\n\t\tm := tc.Msg()\n\n\t\trec := dnstest.NewRecorder(&test.ResponseWriter{})\n\t\t_, err := etc.ServeDNS(ctxt, rec, m)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Expected no error, got %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tresp := rec.Msg\n\t\tif err := test.Header(tc, resp); err != nil {\n\t\t\tt.Errorf(\"Test %d: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif err := test.Section(tc, test.Answer, resp.Answer); err != nil {\n\t\t\tt.Errorf(\"Test %d: %v\", i, err)\n\t\t}\n\t\tif err := test.Section(tc, test.Ns, resp.Ns); err != nil {\n\t\t\tt.Errorf(\"Test %d: %v\", i, err)\n\t\t}\n\t\tif err := test.Section(tc, test.Extra, resp.Extra); err != nil {\n\t\t\tt.Errorf(\"Test %d: %v\", i, err)\n\t\t}\n\t}\n}", "func List(in io.Reader, cfg *Config) ([]*x509.Certificate, error) {\n\tobjects, err := parseObjects(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn findTrustedCerts(cfg, objects)\n}", "func resolveConfig(recordType string, name string, hostname string,\n\trequiredSuffix string, recurse bool, lookupOptions LookupOptions) ([]string, bool) {\n\tlog := log.With(\"name\", name).With(\"hostname\", hostname)\n\n\t// Split the hostname up into fragments\n\thostParts := strings.Split(hostname, \".\")\n\n\tresults := []string{}\n\n\tfor idx, _ := range hostParts {\n\t\t// Calculate the fragment\n\t\tdomain := strings.Join(hostParts[idx:], \".\")\n\t\tif !strings.HasSuffix(domain, requiredSuffix) {\n\t\t\tlog.Debugln(\"Stopping iteration before\", domain, \" as it does not have required suffix\", requiredSuffix)\n\t\t\t// Break now since we can't possibly continue\n\t\t\tbreak\n\t\t}\n\t\t// Determine the full DNS name with the config prefix\n\t\tdnsName := name + \".\" + domain\n\n\t\tresult := []string{}\n\t\tvar err error\n\t\tswitch recordType {\n\t\t// TXT records are our conventional approach\n\t\tcase DnsTypeTXT:\n\t\t\tresult, err = net.LookupTXT(dnsName)\n\t\t// SRV records are unconventional - we infer their structure and treat\n\t\t// the result IPs as a list of joinable values.\n\t\tcase DnsTypeSRV:\n\t\t\tvar srvCname string\n\t\t\tvar srvResults []*net.SRV\n\t\t\tsrvCname, srvResults, err = net.LookupSRV(\"\", \"\", dnsName)\n\t\t\tlog.Debugln(\"SRV lookup got CNAME\", srvCname)\n\t\t\t// Construct a result array of <host>:<port> fragments.\n\t\t\tfor _, srvResult := range srvResults {\n\t\t\t\tif lookupOptions.SrvOptions.SuppressPort {\n\t\t\t\t\tresult = append(result, fmt.Sprintf(\"%s\", srvResult.Target))\n\t\t\t\t} else {\n\t\t\t\t\tresult = append(result, fmt.Sprintf(\"%s:%d\", srvResult.Target, srvResult.Port))\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Panicln(\"Unrecognized record type requested.\")\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Debugln(\"Failed querying\", dnsName, err)\n\t\t} else {\n\t\t\tlog.Debugln(\"Lookup\", dnsName, \"found value\", result)\n\t\t\tresults = append(results, result...)\n\t\t\tif !recurse {\n\t\t\t\t// If not recursing, terminate iteration\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(results) == 0 {\n\t\tlog.Debugln(\"Found no keys\")\n\t\treturn []string{}, false\n\t}\n\treturn results, true\n}", "func (api *powerdnsProvider) GetNameservers(string) ([]*models.Nameserver, error) {\n\tvar r []string\n\tfor _, j := range api.nameservers {\n\t\tr = append(r, j.Name)\n\t}\n\treturn models.ToNameservers(r)\n}", "func GetSSLKeysByHostName(w http.ResponseWriter, r *http.Request) {\n\tinf, userErr, sysErr, errCode := api.NewInfo(r, []string{\"hostname\"}, nil)\n\tif userErr != nil || sysErr != nil {\n\t\tapi.HandleErr(w, r, inf.Tx.Tx, errCode, userErr, sysErr)\n\t\treturn\n\t}\n\tdefer inf.Close()\n\n\tif inf.Config.RiakEnabled == false {\n\t\tapi.HandleErr(w, r, inf.Tx.Tx, http.StatusServiceUnavailable, errors.New(\"the Riak service is unavailable\"), errors.New(\"getting SSL keys from Riak by host name: Riak is not configured\"))\n\t\treturn\n\t}\n\n\thostName := inf.Params[\"hostname\"]\n\tdomainName := \"\"\n\thostRegex := \"\"\n\tstrArr := strings.Split(hostName, \".\")\n\tln := len(strArr)\n\tif ln > 1 {\n\t\tfor i := 2; i < ln-1; i++ {\n\t\t\tdomainName += strArr[i] + \".\"\n\t\t}\n\t\tdomainName += strArr[ln-1]\n\t\thostRegex = `.*\\.` + strArr[1] + `\\..*`\n\t}\n\n\t// lookup the cdnID\n\tcdnID, ok, err := getCDNIDByDomainname(domainName, inf.Tx.Tx)\n\tif err != nil {\n\t\tapi.HandleErr(w, r, inf.Tx.Tx, http.StatusInternalServerError, nil, errors.New(\"getting cdn id by domain name: \"+err.Error()))\n\t\treturn\n\t}\n\tif !ok {\n\t\tapi.WriteRespAlert(w, r, tc.InfoLevel, \" - a cdn does not exist for the domain: \"+domainName+\" parsed from hostname: \"+hostName)\n\t\treturn\n\t}\n\t// now lookup the deliveryservice xmlID\n\txmlID, ok, err := getXMLID(cdnID, hostRegex, inf.Tx.Tx)\n\tif err != nil {\n\t\tapi.HandleErr(w, r, inf.Tx.Tx, http.StatusInternalServerError, nil, errors.New(\"getting xml id: \"+err.Error()))\n\t\treturn\n\t}\n\tif !ok {\n\t\tapi.WriteRespAlert(w, r, tc.InfoLevel, \" - a delivery service does not exist for a host with hostname of \"+hostName)\n\t\treturn\n\t}\n\n\tgetSSLKeysByXMLIDHelper(xmlID, inf, w, r)\n}", "func NameserversToStrings(nss []*Nameserver) (s []string) {\n\tfor _, ns := range nss {\n\t\ts = append(s, ns.Name)\n\t}\n\treturn s\n}", "func (o GroupDnsConfigOutput) Nameservers() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GroupDnsConfig) []string { return v.Nameservers }).(pulumi.StringArrayOutput)\n}", "func findTrustedCerts(cfg *Config, objects []*Object) ([]*x509.Certificate, error) {\n\tvar out []*x509.Certificate\n\n\tcerts := filterObjectsByClass(objects, \"CKO_CERTIFICATE\")\n\ttrusts := filterObjectsByClass(objects, \"CKO_NSS_TRUST\")\n\n\tfor _, cert := range certs {\n\t\tderBytes := cert.attrs[\"CKA_VALUE\"].value\n\t\thash := sha1.New()\n\t\thash.Write(derBytes)\n\t\tdigest := hash.Sum(nil)\n\n\t\tx509, err := x509.ParseCertificate(derBytes)\n\t\tif err != nil {\n\t\t\t// This is known to occur because of a broken certificate in NSS.\n\t\t\t// https://bugzilla.mozilla.org/show_bug.cgi?id=707995\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO(agl): wtc tells me that Mozilla might get rid of the\n\t\t// SHA1 records in the future and use issuer and serial number\n\t\t// to match trust records to certificates (which is what NSS\n\t\t// currently uses). This needs some changes to the crypto/x509\n\t\t// package to keep the raw names around.\n\n\t\tvar trust *Object\n\t\tfor _, possibleTrust := range trusts {\n\t\t\tif bytes.Equal(digest, possibleTrust.attrs[\"CKA_CERT_SHA1_HASH\"].value) {\n\t\t\t\ttrust = possibleTrust\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttrustType := trust.attrs[\"CKA_TRUST_SERVER_AUTH\"].value\n\n\t\tvar trusted bool\n\t\tswitch string(trustType) {\n\t\tcase \"CKT_NSS_NOT_TRUSTED\":\n\t\t\t// An explicitly distrusted cert\n\t\t\ttrusted = false\n\t\tcase \"CKT_NSS_TRUSTED_DELEGATOR\":\n\t\t\t// A cert trusted for issuing SSL server certs.\n\t\t\ttrusted = true\n\t\tcase \"CKT_NSS_TRUST_UNKNOWN\", \"CKT_NSS_MUST_VERIFY_TRUST\":\n\t\t\t// A cert not trusted for issuing SSL server certs, but is trusted for other purposes.\n\t\t\ttrusted = false\n\t\t}\n\n\t\tif !trusted && !cfg.IncludedUntrustedFlag {\n\t\t\tcontinue\n\t\t}\n\n\t\tout = append(out, x509)\n\t}\n\n\treturn out, nil\n}", "func (o SslCertificateManagedSslCertificateResponsePtrOutput) Domains() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *SslCertificateManagedSslCertificateResponse) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Domains\n\t}).(pulumi.StringArrayOutput)\n}", "func resolveIPsToHostnames() ([]IPHostnamesPair, error) {\n\tipAddrs, err := getLocalIPAddresses()\n\tif err != nil {\n\t\treturn []IPHostnamesPair{}, err\n\t}\n\n\t// Reverse resolve all IPs, only keep those which match to a hostname\n\thostnamePairs := []IPHostnamesPair{}\n\tfor _, ip := range ipAddrs {\n\t\tnames, err := net.LookupAddr(ip.String())\n\t\tif err == nil {\n\t\t\tpair := IPHostnamesPair{\n\t\t\t\tIp: ip,\n\t\t\t\tHostnames: names,\n\t\t\t}\n\t\t\thostnamePairs = append(hostnamePairs, pair)\n\t\t} else {\n\t\t\tlog.With(\"ip\", ip.String()).Debugln(\"No DNS results for IP:\", err)\n\t\t}\n\t}\n\treturn hostnamePairs, nil\n}", "func (o *LdapProvider) GetHostnamesOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Hostnames, true\n}", "func certFromHost(host, port string) (*x509.Certificate, error) {\n\tvar hostCert *x509.Certificate\n\td := &net.Dialer{\n\t\tTimeout: time.Duration(TimeoutSeconds) * time.Second,\n\t}\n\n\t// Connect insecurely to the host, range through all certificates found, find the cert that matches the host name for the check, and return it\n\tconn, err := tls.DialWithDialer(d, \"tcp\", host+\":\"+port, &tls.Config{\n\t\tInsecureSkipVerify: true,\n\t\tMinVersion: tls.VersionTLS12,\n\t})\n\n\tif err != nil {\n\t\tlog.Error(\"Error retrieving host certificate: \", []*x509.Certificate{&x509.Certificate{}}, \"\", err)\n\t\treturn hostCert, err\n\t}\n\n\tdefer conn.Close()\n\tcert := conn.ConnectionState().PeerCertificates\n\n\tfor _, clientCert := range cert {\n\t\tfor _, certDNS := range clientCert.DNSNames {\n\t\t\tif certDNS == host {\n\t\t\t\thostCert = clientCert\n\t\t\t}\n\t\t}\n\t}\n\n\tif hostCert == nil {\n\t\terr = errors.New(\"Empty certificate returned\")\n\t}\n\treturn hostCert, err\n}", "func GetHostsSlice()([]string,string){\n\n\tsource:=\"\"\n\tdomains:=make([]string,0)\n\tcurPath:=currentPath()\n\tresultPath:=curPath+\"/pars_result\"\n\n\tfor _,i:=range ReadDir(resultPath){\n\n\t\tfmt.Println(i)\n\n\t}\n\n\tvar path string\n\tfmt.Println(\"Enter directory:\")\n\t_, _ = fmt.Fscan(os.Stdin, &path)\n\tif path==\"links_pars_result\"{\n\t\tsource=\"domain-status.com\"\n\t}\n\n\tvar dir string\n\tfor _,i:=range ReadDir(resultPath+\"/\"+path){\n\t\tfmt.Println(i)\n\t}\n\n\tfmt.Println(\"Enter directory from which will be created url slice:\")\n\t_, _ = fmt.Fscan(os.Stdin, &dir)\n\n\tvar links Links\n\tfor _, i := range ReadDir(curPath + \"/pars_result/\" + linksParsResultDir + \"/\" + dir) {\n\n\t\tfor _, j := range OpenAndReadLinks(curPath+\"/pars_result/\"+linksParsResultDir+\"/\"+dir+\"/\"+i, links) {\n\n\t\t\tdomains = append(domains, j)\n\n\t\t}\n\n\t}\n\n\treturn domains,source\n}", "func GetCertif(w http.ResponseWriter, r *http.Request) {\r\n\tparams := mux.Vars(r)\r\n\tverifi := \"not ok\"\r\n\t\r\n\t//We verify whether the required certificate exists or not\r\n\tfor _, itemm := range certif {\r\n\t\tif itemm.Id==params[\"id\"] {\r\n\t\t\tverifi=\"ok\"\r\n\t\t}\r\n\t}\r\n\t\r\n\tif verifi==\"ok\" {\r\n\t\tfor _, item := range certif {\r\n\t\t\tif item.Id == params[\"id\"] && item.Ownerid==clientnum { //Display information only if the owner ID of the certificate is the same as the connected user ID\r\n\t\t\t\tjson.NewEncoder(w).Encode(item)\r\n\t\t\t\treturn\r\n\t\t\t}\telse if item.Id == params[\"id\"] && item.Ownerid!=clientnum {\r\n\t\t\t\tfmt.Fprintf(w, \"You can't access this certificate.\") //If the certificate does not belong to the connected user: return this message\r\n\t\t\t}\r\n\t\t}\r\n\t}\telse {\r\n\t\tfmt.Fprintf(w, \"This certificate does not exist.\")\r\n\t}\r\n}", "func (o SslCertificateManagedSslCertificatePtrOutput) Domains() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *SslCertificateManagedSslCertificate) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Domains\n\t}).(pulumi.StringArrayOutput)\n}", "func getServiceIdentitiesFromCert(sdscert secrets.SDSCert, serviceIdentity identity.ServiceIdentity, meshCatalog catalog.MeshCataloger) ([]identity.ServiceIdentity, error) {\n\t// Program SAN matching based on SMI TrafficTarget policies\n\tswitch sdscert.CertType {\n\tcase secrets.RootCertTypeForMTLSOutbound:\n\t\t// For the outbound certificate validation context, the SANs needs to match the list of service identities\n\t\t// corresponding to the upstream service. This means, if the sdscert.Name points to service 'X',\n\t\t// the SANs for this certificate should correspond to the service identities of 'X'.\n\t\tmeshSvc, err := sdscert.GetMeshService()\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msgf(\"Error unmarshalling upstream service for outbound cert %s\", sdscert)\n\t\t\treturn nil, err\n\t\t}\n\t\tsvcIdentities, err := meshCatalog.ListServiceIdentitiesForService(*meshSvc)\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msgf(\"Error listing service accounts for service %s\", meshSvc)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn svcIdentities, nil\n\n\tcase secrets.RootCertTypeForMTLSInbound:\n\t\t// Verify that the SDS cert request corresponding to the mTLS root validation cert matches the identity\n\t\t// of this proxy. If it doesn't, then something is wrong in the system.\n\t\tsvcAccountInRequest, err := sdscert.GetK8sServiceAccount()\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msgf(\"Error unmarshalling service account for inbound mTLS validation cert %s\", sdscert)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif svcAccountInRequest.ToServiceIdentity() != serviceIdentity {\n\t\t\tlog.Error().Err(errCertMismatch).Msgf(\"Request for SDS cert %s does not belong to proxy with identity %s\", sdscert.Name, serviceIdentity)\n\t\t\treturn nil, errCertMismatch\n\t\t}\n\n\t\t// For the inbound certificate validation context, the SAN needs to match the list of all downstream\n\t\t// service identities that are allowed to connect to this upstream identity. This means, if the upstream proxy\n\t\t// identity is 'X', the SANs for this certificate should correspond to all the downstream identities\n\t\t// allowed to access 'X'.\n\t\tsvcIdentities, err := meshCatalog.ListAllowedInboundServiceIdentities(serviceIdentity)\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msgf(\"Error listing inbound service accounts for proxy with ServiceAccount %s\", serviceIdentity)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn svcIdentities, nil\n\n\tdefault:\n\t\tlog.Debug().Msgf(\"SAN matching not needed for cert %s\", sdscert)\n\t}\n\n\treturn nil, nil\n}", "func List(c *deis.Client, results int) ([]api.Cert, int, error) {\n\tbody, count, reqErr := c.LimitedRequest(\"/v2/certs/\", results)\n\n\tif reqErr != nil && !deis.IsErrAPIMismatch(reqErr) {\n\t\treturn []api.Cert{}, -1, reqErr\n\t}\n\n\tvar res []api.Cert\n\tif err := json.Unmarshal([]byte(body), &res); err != nil {\n\t\treturn []api.Cert{}, -1, err\n\t}\n\n\treturn res, count, reqErr\n}", "func MatchNameFromCert(cert *CertID, name string) bool {\n\tif cert.CommonName != \"\" && cert.CommonName == name {\n\t\treturn true\n\t}\n\tfor _, alt := range cert.alts() {\n\t\tif alt != \"\" && alt == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ParseCert(binCert []byte) (common.Address, common.Address, string, error) {\n\tvar contrAddr, parentAddr common.Address\n\tvar retDesc string\n\tca, err := x509.ParseCertificate(binCert)\n\tif err!=nil {\n\t\treturn common.Address{}, common.Address{}, \"\", err\n\t}\n\n\tfor i:=0; i<len(ca.Subject.Names); i++ {\n\t\tretDesc += fmt.Sprint(ca.Subject.Names[i].Value) + \" \";\n\t}\n\t// iterate in the extension to get the information\n\tfor _, element := range ca.Extensions {\n\t\tif element.Id.String() == \"1.2.752.115.33.2\" { // CA Address\n\t\t\tfmt.Printf(\"\\tCaContractIdentifier: %+#+x\\n\", element.Value)\n\t\t\tval:=element.Value[2:]\n\t\t\tif( len(val) != len(common.Address{}.Bytes()) ) {\n\t\t\t\treturn common.Address{}, common.Address{}, \"\",\n\t\t\t\t\tGeneralError{\"ParseCert: wrong length of CA addr\"}\n\t\t\t}\n\t\t\tcontrAddr = common.BytesToAddress(val)\n\t\t}\n\t\tif element.Id.String() == \"1.2.752.115.33.1\" { //Parent Address\n\t\t\tfmt.Printf(\"\\tIssuerCaContractIdentifier: %+#+x\\n\", element.Value)\n\t\t\tval:=element.Value[2:]\n\t\t\tif( len(val) != len(common.Address{}.Bytes()) ) {\n\t\t\t\treturn common.Address{}, common.Address{}, \"\",\n\t\t\t\t\tGeneralError{\"ParseCert: wrong length of CA addr\"}\n\t\t\t}\n\t\t\tparentAddr = common.BytesToAddress(val)\n\t\t}\n\t}\n\treturn contrAddr, parentAddr, retDesc, nil\n}", "func (c *PAPIClient) Hostnames(contractID, groupID string) ([]HostnameSummary, error) {\n\thostnames := &Hostnames{}\n\terr := resourceRequest(c, \"GET\", papiHostnamesEndpoint(c.GetCredentials(), contractID, groupID), nil, hostnames)\n\tif err != nil {\n\t\treturn []HostnameSummary{}, err\n\t}\n\treturn hostnames.Hostnames.Items, err\n}", "func runDNS(workers int, hostnames []string) ([]string, map[string]string) {\n\tipm, errors := resolveAllDNS(workers, hostnames)\n\tvar ipAddrs []string\n\tipAddrs = nil\n\n\tvar reverseIP map[string]string\n\treverseIP = make(map[string]string)\n\n\tfor _, val := range ipm {\n\t\tfor _, ip := range val.addresses {\n\t\t\tif stringInSlice(ip, ipAddrs) { // skip duplicate IP addresses\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tipAddrs = append(ipAddrs, ip)\n\t\t\treverseIP[ip] = val.hostname\n\t\t}\n\t}\n\tif len(errors) > 0 {\n\t\tvar errBuilder strings.Builder\n\t\tfor _, err := range errors {\n\t\t\terrBuilder.WriteString(fmt.Sprintf(\"%s\\n\", err.Error()))\n\t\t}\n\t\tfmt.Printf(\"\\n%s\\n\\n\", errBuilder.String())\n\t}\n\treturn ipAddrs, reverseIP\n}", "func boundIPs(c *caddy.Controller) (ips []net.IP) {\n\tconf := dnsserver.GetConfig(c)\n\thosts := conf.ListenHosts\n\tif hosts == nil || hosts[0] == \"\" {\n\t\thosts = nil\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tfor _, addr := range addrs {\n\t\t\thosts = append(hosts, addr.String())\n\t\t}\n\t}\n\tfor _, host := range hosts {\n\t\tip, _, _ := net.ParseCIDR(host)\n\t\tip4 := ip.To4()\n\t\tif ip4 != nil && !ip4.IsLoopback() {\n\t\t\tips = append(ips, ip4)\n\t\t\tcontinue\n\t\t}\n\t\tip6 := ip.To16()\n\t\tif ip6 != nil && !ip6.IsLoopback() {\n\t\t\tips = append(ips, ip6)\n\t\t}\n\t}\n\treturn ips\n}", "func getPeerCertificates(addr, serverName, roots string, insecure bool) ([]*x509.Certificate, error) {\n\tvar (\n\t\terr error\n\t\trootCAs *x509.CertPool\n\t)\n\tif roots != \"\" {\n\t\trootCAs, err = x509util.ReadCertPool(roots)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failure to load root certificate pool from input path '%s'\", roots)\n\t\t}\n\t}\n\tif _, _, err := net.SplitHostPort(addr); err != nil {\n\t\taddr = net.JoinHostPort(addr, \"443\")\n\t}\n\ttlsConfig := &tls.Config{\n\t\tMinVersion: tls.VersionTLS12,\n\t\tRootCAs: rootCAs,\n\t}\n\tif insecure {\n\t\ttlsConfig.InsecureSkipVerify = true\n\t}\n\tif serverName != \"\" {\n\t\ttlsConfig.ServerName = serverName\n\t}\n\tconn, err := tls.Dial(\"tcp\", addr, tlsConfig)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to connect\")\n\t}\n\tconn.Close()\n\treturn conn.ConnectionState().PeerCertificates, nil\n}", "func prepareAllowedDomain(requestURL string) ([]string, error) {\n\trequestURL = \"https://\" + trimProtocol(requestURL)\n\n\tu, err := url.ParseRequestURI(requestURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse request URI: %w\", err)\n\t}\n\n\tdomain := strings.TrimPrefix(u.Hostname(), \"www.\")\n\n\treturn []string{\n\t\tdomain,\n\t\t\"www.\" + domain,\n\t\t\"http://\" + domain,\n\t\t\"https://\" + domain,\n\t\t\"http://www.\" + domain,\n\t\t\"https://www.\" + domain,\n\t}, nil\n}", "func getKubeAPIServerSecretName(client client.Client, dnsName string) (string, error) {\n\tapiserver := &ocinfrav1.APIServer{}\n\tif err := client.Get(\n\t\tcontext.TODO(),\n\t\ttypes.NamespacedName{Name: apiserverConfigName},\n\t\tapiserver,\n\t); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tlog.Info(\"APIServer cluster not found\")\n\t\t\treturn \"\", nil\n\t\t}\n\t\treturn \"\", err\n\t}\n\t// iterate through all namedcertificates\n\tfor _, namedCert := range apiserver.Spec.ServingCerts.NamedCertificates {\n\t\tfor _, name := range namedCert.Names {\n\t\t\tif strings.EqualFold(name, dnsName) {\n\t\t\t\treturn namedCert.ServingCertificate.Name, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", nil\n}", "func sortedNsLookup(host string, ips *[]string) error {\n\tnetIPs, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, netIP := range netIPs {\n\t\t*ips = append(*ips, netIP.String())\n\t}\n\tsort.Strings(*ips)\n\treturn nil\n}", "func GetSearchDomains(resolvConf []byte) []string {\n\tvar domains []string\n\tfor _, line := range getLines(resolvConf, []byte(\"#\")) {\n\t\tmatch := searchRegexp.FindSubmatch(line)\n\t\tif match == nil {\n\t\t\tcontinue\n\t\t}\n\t\tdomains = strings.Fields(string(match[1]))\n\t}\n\treturn domains\n}", "func processx509Certs(keys []string) ([][]byte, error) {\n\tvar x509s [][]byte\n\tfor _, key := range keys {\n\t\tfileName := strings.Split(key, \":\")[0]\n\t\tif _, err := os.Stat(fileName); os.IsNotExist(err) {\n\t\t\tcontinue\n\t\t}\n\t\ttmp, err := os.ReadFile(fileName)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Unable to read file: %w\", err)\n\t\t}\n\t\tif !encutils.IsCertificate(tmp) {\n\t\t\tcontinue\n\t\t}\n\t\tx509s = append(x509s, tmp)\n\n\t}\n\treturn x509s, nil\n}", "func Nameservers(ns []string) DomainPurchaseOpt {\n\treturn func(rec *DomainPurchase) error {\n\t\trec.NameServers = ns\n\t\treturn nil\n\t}\n}", "func (input *Input) SubDomains() string {\n\tparts := strings.Split(input.Host(), \".\")\n\tif len(parts) >= 3 {\n\t\treturn strings.Join(parts[:len(parts)-2], \".\")\n\t}\n\treturn \"\"\n}", "func (dir *Dir) Hostnames() ([]string, error) {\n\tif dir.Config.Changed(\"host-wrapper\") {\n\t\tvariables := map[string]string{\n\t\t\t\"HOST\": dir.Config.GetAllowEnvVar(\"host\"),\n\t\t\t\"ENVIRONMENT\": dir.Config.Get(\"environment\"),\n\t\t\t\"DIRNAME\": dir.BaseName(),\n\t\t\t\"DIRPATH\": dir.Path,\n\t\t\t\"SCHEMA\": dir.Config.GetAllowEnvVar(\"schema\"),\n\t\t}\n\t\tshellOut, err := shellout.New(dir.Config.Get(\"host-wrapper\")).WithVariables(variables)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn shellOut.RunCaptureSplit()\n\t}\n\treturn dir.Config.GetSliceAllowEnvVar(\"host\", ',', true), nil\n}", "func (dir *Dir) Hostnames() ([]string, error) {\n\tif dir.Config.Changed(\"host-wrapper\") {\n\t\tvariables := map[string]string{\n\t\t\t\"HOST\": dir.Config.Get(\"host\"),\n\t\t\t\"ENVIRONMENT\": dir.Config.Get(\"environment\"),\n\t\t\t\"DIRNAME\": dir.BaseName(),\n\t\t\t\"DIRPATH\": dir.Path,\n\t\t\t\"SCHEMA\": dir.Config.Get(\"schema\"),\n\t\t}\n\t\tshellOut, err := util.NewInterpolatedShellOut(dir.Config.Get(\"host-wrapper\"), variables)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn shellOut.RunCaptureSplit()\n\t}\n\treturn dir.Config.GetSlice(\"host\", ',', true), nil\n}", "func (k *Kubernetes) nsAddrs(external, headless bool, zone string) []dns.RR {\n\tvar (\n\t\tsvcNames []string\n\t\tsvcIPs []net.IP\n\t\tfoundEndpoint bool\n\t)\n\n\t// Find the CoreDNS Endpoints\n\tfor _, localIP := range k.localIPs {\n\t\tendpoints := k.APIConn.EpIndexReverse(localIP.String())\n\n\t\t// Collect IPs for all Services of the Endpoints\n\t\tfor _, endpoint := range endpoints {\n\t\t\tfoundEndpoint = true\n\t\t\tsvcs := k.APIConn.SvcIndex(endpoint.Index)\n\t\t\tfor _, svc := range svcs {\n\t\t\t\tif external {\n\t\t\t\t\tsvcName := strings.Join([]string{svc.Name, svc.Namespace, zone}, \".\")\n\n\t\t\t\t\tif headless && svc.Headless() {\n\t\t\t\t\t\tfor _, s := range endpoint.Subsets {\n\t\t\t\t\t\t\tfor _, a := range s.Addresses {\n\t\t\t\t\t\t\t\tsvcNames = append(svcNames, endpointHostname(a, k.endpointNameMode)+\".\"+svcName)\n\t\t\t\t\t\t\t\tsvcIPs = append(svcIPs, net.ParseIP(a.IP))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfor _, exIP := range svc.ExternalIPs {\n\t\t\t\t\t\t\tsvcNames = append(svcNames, svcName)\n\t\t\t\t\t\t\tsvcIPs = append(svcIPs, net.ParseIP(exIP))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsvcName := strings.Join([]string{svc.Name, svc.Namespace, Svc, zone}, \".\")\n\t\t\t\tif svc.Headless() {\n\t\t\t\t\t// For a headless service, use the endpoints IPs\n\t\t\t\t\tfor _, s := range endpoint.Subsets {\n\t\t\t\t\t\tfor _, a := range s.Addresses {\n\t\t\t\t\t\t\tsvcNames = append(svcNames, endpointHostname(a, k.endpointNameMode)+\".\"+svcName)\n\t\t\t\t\t\t\tsvcIPs = append(svcIPs, net.ParseIP(a.IP))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfor _, clusterIP := range svc.ClusterIPs {\n\t\t\t\t\t\tsvcNames = append(svcNames, svcName)\n\t\t\t\t\t\tsvcIPs = append(svcIPs, net.ParseIP(clusterIP))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// If no CoreDNS endpoints were found, use the localIPs directly\n\tif !foundEndpoint {\n\t\tsvcIPs = make([]net.IP, len(k.localIPs))\n\t\tsvcNames = make([]string, len(k.localIPs))\n\t\tfor i, localIP := range k.localIPs {\n\t\t\tsvcNames[i] = defaultNSName + zone\n\t\t\tsvcIPs[i] = localIP\n\t\t}\n\t}\n\n\t// Create an RR slice of collected IPs\n\trrs := make([]dns.RR, len(svcIPs))\n\tfor i, ip := range svcIPs {\n\t\tif ip.To4() == nil {\n\t\t\trr := new(dns.AAAA)\n\t\t\trr.Hdr.Class = dns.ClassINET\n\t\t\trr.Hdr.Rrtype = dns.TypeAAAA\n\t\t\trr.Hdr.Name = svcNames[i]\n\t\t\trr.AAAA = ip\n\t\t\trrs[i] = rr\n\t\t\tcontinue\n\t\t}\n\t\trr := new(dns.A)\n\t\trr.Hdr.Class = dns.ClassINET\n\t\trr.Hdr.Rrtype = dns.TypeA\n\t\trr.Hdr.Name = svcNames[i]\n\t\trr.A = ip\n\t\trrs[i] = rr\n\t}\n\n\treturn rrs\n}", "func parseCertificates(der [][]byte) ([]*x509.Certificate, error) {\n\tvar err error\n\tcerts := make([]*x509.Certificate, len(der))\n\tfor i, c := range der {\n\t\tcerts[i], err = x509.ParseCertificate(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn certs, nil\n}", "func (s TLSSpec) GetParsedAltNames() (dnsNames, ipAddresses, emailAddresses []string, err error) {\n\tfor _, name := range s.GetAltNames() {\n\t\tif net.ParseIP(name) != nil {\n\t\t\tipAddresses = append(ipAddresses, name)\n\t\t} else if validation.IsValidDNSName(name) {\n\t\t\tdnsNames = append(dnsNames, name)\n\t\t} else if validation.IsValidEmailAddress(name) {\n\t\t\temailAddresses = append(emailAddresses, name)\n\t\t} else {\n\t\t\treturn nil, nil, nil, errors.WithStack(errors.Newf(\"'%s' is not a valid alternate name\", name))\n\t\t}\n\t}\n\treturn dnsNames, ipAddresses, emailAddresses, nil\n}", "func NS(domain, serverAddr string) *Tsk {\n\tt := newTsk(\"ns\")\n\tservers, err := LookupNS(domain, serverAddr)\n\tif err != nil {\n\t\tt.SetErr(err)\n\t\treturn t\n\t}\n\tfor _, s := range servers {\n\t\tips, err := LookupName(s, serverAddr)\n\t\tif err != nil || len(ips) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, ip := range ips {\n\t\t\tt.AddResult(ip, strings.TrimRight(s, \".\"))\n\t\t}\n\t}\n\treturn t\n}", "func getAddresses(endpoint *v1.Endpoints) []string {\n\tserverAddresses := []string{}\n\tif endpoint == nil {\n\t\treturn serverAddresses\n\t}\n\tfor _, subset := range endpoint.Subsets {\n\t\tvar port string\n\t\tif len(subset.Ports) > 0 {\n\t\t\tport = strconv.Itoa(int(subset.Ports[0].Port))\n\t\t}\n\t\tif port == \"\" {\n\t\t\tport = \"443\"\n\t\t}\n\t\tfor _, address := range subset.Addresses {\n\t\t\tserverAddresses = append(serverAddresses, net.JoinHostPort(address.IP, port))\n\t\t}\n\t}\n\treturn serverAddresses\n}", "func (o GroupDnsConfigPtrOutput) Nameservers() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *GroupDnsConfig) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Nameservers\n\t}).(pulumi.StringArrayOutput)\n}", "func (state *RuntimeState) DomainCertHandler(w http.ResponseWriter, r *http.Request) {\n\n\tfixedUrl := certPattern\n\n\tfixedUrlLength := len(fixedUrl)\n\n\turlPath := r.URL.Path\n\turlPathLength := len(urlPath)\n\n\t//extract domain name from url\n\tdomainName := urlPath[fixedUrlLength:urlPathLength]\n\n\tdomainCert, err := state.GetCertificatefromCache(domainName)\n\tif err != nil {\n\t\tif err == autocert.ErrCacheMiss {\n\t\t\tdomainCert, err := state.GenerateCert(domainName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error while getting cert\", err)\n\t\t\t\thttp.Error(w, fmt.Sprint(\"Error while Getting Certificate from cache store\", err), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprintf(w, string(domainCert))\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"error while getting cert\", err)\n\t\t\thttp.Error(w, fmt.Sprint(\"Error while Getting Certificate from cache store\", err), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Fprintf(w, string(domainCert))\n}", "func GetAllCertif(w http.ResponseWriter, r *http.Request) {\r\n\tparams := mux.Vars(r)\r\n\tallcertif=allcertif[:0]\r\n\t//Verification of the access authorization and collect of the user's certificates\r\n\t\r\n\tif params[\"userid\"]==clientnum{\r\n\t\tfor _, item := range certif {\r\n\t\t\t//We add the user's certifications in 'allcertif'\r\n\t\t\tif item.Ownerid==clientnum {\r\n\t\t\t\tallcertif=append(allcertif, item)\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\t//We display the user's information for more clarity\r\n\t\tfor _, itemm := range owner {\r\n\t\t\tif itemm.Userid==clientnum {\r\n\t\t\tfmt.Fprintf(w, \"Client information: \")\r\n\t\t\tjson.NewEncoder(w).Encode(itemm)\r\n\t\t\tfmt.Fprintf(w, \"\\n\")\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\t//We display the user's certificates\r\n\t\tfmt.Fprintf(w, \"Certificates of the client: \")\r\n\t\tjson.NewEncoder(w).Encode(allcertif)\r\n\t\t\r\n\t}\telse {\r\n\tfmt.Fprintf(w, \"You can't access this information.\")\r\n\t}\r\n}", "func Lookup(domainName string) (bool, string) {\n\tfor index := 0; index < len(entries); index++ {\n\t\tif domainName == entries[index].domainName {\n\t\t\treturn true, entries[index].ip\n\t\t}\n\t}\n\n\treturn false, \"\"\n}", "func (c *Consul) IPsDC(ctx context.Context, dc, tag string) ([]string, error) {\n\turl := fmt.Sprintf(\"http://%s/v1/catalog/service/statusd-rpc?tag=%s\", c.hostport, tag)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"request: %s\", err)\n\t}\n\treq = req.WithContext(ctx)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"http call: %s\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tips, err := ParseConsulResponse(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get nodes list: %s\", err)\n\t}\n\treturn ips, err\n}", "func (o *NetworkDns) GetNameServers() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.NameServers\n}", "func getMemberClusterApiServerUrls(kubeconfig *clientcmdapi.Config, clusterNames []string) ([]string, error) {\n\tvar urls []string\n\tfor _, name := range clusterNames {\n\t\tif cluster := kubeconfig.Clusters[name]; cluster != nil {\n\t\t\turls = append(urls, cluster.Server)\n\t\t} else {\n\t\t\treturn nil, xerrors.Errorf(\"cluster '%s' not found in kubeconfig\", name)\n\t\t}\n\t}\n\treturn urls, nil\n}", "func (d *dnsClient) GetDomainNames(ctx context.Context) (map[string]string, error) {\n\tdomains, err := d.getDomainsWithCache(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdomainNames := make(map[string]string)\n\tfor _, domain := range domains {\n\t\tdomainNames[domain.DomainName] = CompositeDomainName(domain.DomainName, domain.DomainId)\n\t}\n\treturn domainNames, nil\n}", "func ParseNameServers(fileContent []byte) []string {\n\tfileLines := bytes.Split(fileContent, []byte(\"\\n\"))\n\tvar nameservers []string\n\tfor _, currentLine := range fileLines {\n\t\tvar contentToParse = currentLine\n\t\tvar commentIndicatorIndex = bytes.Index(currentLine, []byte(\"#\"))\n\t\tif commentIndicatorIndex != -1 {\n\t\t\t// Only check the content before the comment section\n\t\t\tcontentToParse = currentLine[:commentIndicatorIndex]\n\t\t}\n\n\t\tserver := nameServerRegex.FindSubmatch(contentToParse)\n\t\tif len(server) == 2 {\n\t\t\taddress := string(server[1])\n\t\t\tif net.ParseIP(address) != nil {\n\t\t\t\tnameservers = append(nameservers, address)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nameservers\n}", "func ComponentCertSecretNames(stackName string) []string {\n\treturn []string{\n\t\tfmt.Sprintf(\"%s-gateway-client-http\", stackName),\n\t\tfmt.Sprintf(\"%s-compactor-http\", stackName),\n\t\tfmt.Sprintf(\"%s-compactor-grpc\", stackName),\n\t\tfmt.Sprintf(\"%s-distributor-http\", stackName),\n\t\tfmt.Sprintf(\"%s-distributor-grpc\", stackName),\n\t\tfmt.Sprintf(\"%s-index-gateway-http\", stackName),\n\t\tfmt.Sprintf(\"%s-index-gateway-grpc\", stackName),\n\t\tfmt.Sprintf(\"%s-ingester-http\", stackName),\n\t\tfmt.Sprintf(\"%s-ingester-grpc\", stackName),\n\t\tfmt.Sprintf(\"%s-querier-http\", stackName),\n\t\tfmt.Sprintf(\"%s-querier-grpc\", stackName),\n\t\tfmt.Sprintf(\"%s-query-frontend-http\", stackName),\n\t\tfmt.Sprintf(\"%s-query-frontend-grpc\", stackName),\n\t\tfmt.Sprintf(\"%s-ruler-http\", stackName),\n\t\tfmt.Sprintf(\"%s-ruler-grpc\", stackName),\n\t}\n}", "func GetURINamesFromPEM(encodedCertificate string) (uris []string, err error) {\n\treturn uriNamesFromPEM([]byte(encodedCertificate))\n}", "func getNameServerAddressListFromCmd(nameSrvAdders *string) *singlylinkedlist.List {\n\tif nameSrvAdders != nil {\n\t\tif *nameSrvAdders == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tnameSrvAdderArr := strings.Split(*nameSrvAdders, \";\")\n\t\tif len(nameSrvAdderArr) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tnameServerAddressList := singlylinkedlist.New()\n\t\tfor _, nameServerAddress := range nameSrvAdderArr {\n\t\t\tnameServerAddressList.Add(nameServerAddress)\n\t\t}\n\t\treturn nameServerAddressList\n\t}\n\treturn nil\n}", "func filterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) []byte {\n\t// If we're using the host netns, we have nothing to do besides hash the file.\n\tif !netnsEnabled {\n\t\treturn resolvConf\n\t}\n\tcleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})\n\t// if IPv6 is not enabled, also clean out any IPv6 address nameserver\n\tif !ipv6Enabled {\n\t\tcleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{})\n\t}\n\t// if the resulting resolvConf has no more nameservers defined, add appropriate\n\t// default DNS servers for IPv4 and (optionally) IPv6\n\tif len(getNameservers(cleanedResolvConf)) == 0 {\n\t\tlogrus.Infof(\"No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v\", defaultIPv4Dns)\n\t\tdns := defaultIPv4Dns\n\t\tif ipv6Enabled {\n\t\t\tlogrus.Infof(\"IPv6 enabled; Adding default IPv6 external servers: %v\", defaultIPv6Dns)\n\t\t\tdns = append(dns, defaultIPv6Dns...)\n\t\t}\n\t\tcleanedResolvConf = append(cleanedResolvConf, []byte(\"\\n\"+strings.Join(dns, \"\\n\"))...)\n\t}\n\treturn cleanedResolvConf\n}", "func getSubjectAltNamesFromSvcIdentities(serviceIdentities []identity.ServiceIdentity) []*xds_matcher.StringMatcher {\n\tvar matchSANs []*xds_matcher.StringMatcher\n\n\tfor _, si := range serviceIdentities {\n\t\tmatch := xds_matcher.StringMatcher{\n\t\t\tMatchPattern: &xds_matcher.StringMatcher_Exact{\n\t\t\t\tExact: si.String(),\n\t\t\t},\n\t\t}\n\t\tmatchSANs = append(matchSANs, &match)\n\t}\n\n\treturn matchSANs\n}" ]
[ "0.66689825", "0.63112724", "0.6159213", "0.6148023", "0.6127139", "0.6075504", "0.6012741", "0.59553957", "0.5781968", "0.5766072", "0.5753809", "0.5711643", "0.57008517", "0.5689711", "0.56568915", "0.5643009", "0.5611596", "0.5599547", "0.5524786", "0.5524786", "0.55241066", "0.55184895", "0.5469429", "0.5438967", "0.54285944", "0.5426247", "0.5400186", "0.5351963", "0.53489536", "0.53450745", "0.53434855", "0.53336996", "0.5319782", "0.5315697", "0.53095776", "0.529208", "0.5290696", "0.5264753", "0.5242906", "0.5224742", "0.5219537", "0.5216305", "0.52098066", "0.51909375", "0.518886", "0.518319", "0.51695186", "0.5162935", "0.5159747", "0.5157588", "0.51568586", "0.51564676", "0.51350677", "0.5131044", "0.51277876", "0.51131636", "0.5101509", "0.5098475", "0.5098156", "0.5093317", "0.50839627", "0.50769544", "0.50713223", "0.5062137", "0.5039654", "0.50391006", "0.50371873", "0.5035802", "0.50206965", "0.50133204", "0.5002592", "0.500007", "0.49989486", "0.4994417", "0.49923304", "0.49882612", "0.49765468", "0.49761373", "0.49677637", "0.49653357", "0.49510044", "0.49487966", "0.49466142", "0.49408084", "0.49262062", "0.4924817", "0.49237806", "0.49178216", "0.4917665", "0.49026418", "0.48976025", "0.4896772", "0.48916948", "0.48915762", "0.48859018", "0.48841363", "0.48801312", "0.48624918", "0.48583987", "0.48522276" ]
0.75606513
0
ParseCertPemFile read from PEM file and return parsed cert
func ParseCertPemFile(cert_file string) (cert *x509.Certificate, err error) { cert_data, err := os.ReadFile(cert_file) if err != nil { err = fmt.Errorf("Read ca-cert.pem: %v", err) return } return ParsePem(cert_data) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func parseCertificate(path string) (cert *x509.Certificate, err error) {\n var pemData []byte\n\n pemData, err = ioutil.ReadFile(path)\n if err != nil {\n return\n }\n block, rest := pem.Decode(pemData)\n if block == nil || len(rest) != 0 {\n err = errors.New(\"Failed to decode the PEM certificate\")\n return\n }\n cert, err = x509.ParseCertificate(block.Bytes)\n\n return\n}", "func parseCertFile(filename string) (*x509.Certificate, error) {\n\tct, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblock, _ := pem.Decode(ct)\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cert, nil\n}", "func parsePEMCert(certData []byte) (*x509.Certificate, error) {\n\tpemBlock, trailingData := pem.Decode(certData)\n\tif pemBlock == nil {\n\t\treturn nil, fmt.Errorf(\"invalid PEM data\")\n\t}\n\tif len(trailingData) != 0 {\n\t\treturn nil, fmt.Errorf(\"trailing data after first PEM block\")\n\t}\n\treturn x509.ParseCertificate(pemBlock.Bytes)\n}", "func ParseCertPEM(certPem []byte) (output []*x509.Certificate, err error) {\n\tfor len(certPem) > 0 {\n\t\tvar block *pem.Block\n\t\tblock, certPem = pem.Decode(certPem)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\tif block.Type != BlockTypeCertificate || len(block.Headers) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tcert, certErr := x509.ParseCertificate(block.Bytes)\n\t\tif certErr != nil {\n\t\t\terr = ex.New(certErr)\n\t\t\treturn\n\t\t}\n\t\toutput = append(output, cert)\n\t}\n\n\treturn\n}", "func getCertFromPem(pemBytes []byte) (*x509.Certificate, error) {\n\tpemCert, _ := pem.Decode(pemBytes)\n\tif pemCert == nil || pemCert.Type != \"CERTIFICATE\" {\n\t\treturn nil, fmt.Errorf(\"decoding pem bytes: %v\", pemBytes)\n\t}\n\n\tcert, err := x509.ParseCertificate(pemCert.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing x509 cert: %v\", err)\n\t}\n\n\treturn cert, nil\n}", "func loadCertFromPEM(path string) ([]*x509.Certificate, error) {\n\tpemCerts, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcerts := make([]*x509.Certificate, 0, 5)\n\tfor len(pemCerts) > 0 {\n\t\tvar block *pem.Block\n\t\tblock, pemCerts = pem.Decode(pemCerts)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\tif block.Type != \"CERTIFICATE\" || len(block.Headers) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcerts = append(certs, cert)\n\t}\n\n\treturn certs, nil\n}", "func loadCertFromPEM(path string) ([]*x509.Certificate, error) {\n\tpemCerts, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcerts := make([]*x509.Certificate, 0, 5)\n\tfor len(pemCerts) > 0 {\n\t\tvar block *pem.Block\n\t\tblock, pemCerts = pem.Decode(pemCerts)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\tif block.Type != \"CERTIFICATE\" || len(block.Headers) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcerts = append(certs, cert)\n\t}\n\n\treturn certs, nil\n}", "func ParseCert(pemcert []byte) (*x509.Certificate, error) {\n\tblock, _ := pem.Decode(pemcert)\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cert, nil\n}", "func parseCertificate(path string) *x509.Certificate {\n\tcertBytes, err := os.ReadFile(path)\n\tExpect(err).NotTo(HaveOccurred())\n\tpemBlock, _ := pem.Decode(certBytes)\n\tcert, err := x509.ParseCertificate(pemBlock.Bytes)\n\tExpect(err).NotTo(HaveOccurred())\n\treturn cert\n}", "func LoadPemEncodedCertificate(name string) (certificate *x509.Certificate, err error) {\n\tvar content []byte\n\n\tif content, err = Load(name); err != nil {\n\t\tlog.Fatalf(\"%s\\n\", err)\n\t} else {\n\t\tcertificate, err = tlsutil.ParsePEMEncodedCACert(content)\n\t\tif err != nil {\n\t\t\tlog.Println(\"LoadPemEncodedCertificate\", name)\n\t\t\tpanic(err)\n\t\t}\n\t\tif app.Debug {\n\t\t\tfmt.Println(Jsonify(certificate))\n\t\t}\n\t}\n\treturn certificate, err\n}", "func loadCaCertPem(in io.Reader) ([]byte, error) {\n\tcaCertPemBytes, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblock, _ := pem.Decode(caCertPemBytes)\n\tif block == nil {\n\t\treturn nil, errors.New(\"could not decode pem\")\n\t}\n\tif block.Type != \"CERTIFICATE\" {\n\t\treturn nil, fmt.Errorf(\"ca bundle contains wrong pem type: %q\", block.Type)\n\t}\n\tif _, err := x509.ParseCertificate(block.Bytes); err != nil {\n\t\treturn nil, fmt.Errorf(\"ca bundle contains invalid x509 certificate: %v\", err)\n\t}\n\treturn caCertPemBytes, nil\n}", "func parseCert(crtPEM []byte) (*bcx509.Certificate, error) {\n\tcertBlock, _ := pem.Decode(crtPEM)\n\tif certBlock == nil {\n\t\treturn nil, fmt.Errorf(\"decode pem failed, invalid certificate\")\n\t}\n\n\tcert, err := bcx509.ParseCertificate(certBlock.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"x509 parse cert failed, %s\", err)\n\t}\n\n\treturn cert, nil\n}", "func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) {\n\tok := false\n\tcerts := []*x509.Certificate{}\n\tfor len(pemCerts) > 0 {\n\t\tvar block *pem.Block\n\t\tblock, pemCerts = pem.Decode(pemCerts)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\t// Only use PEM \"CERTIFICATE\" blocks without extra headers\n\t\tif block.Type != \"CERTIFICATE\" || len(block.Headers) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn certs, err\n\t\t}\n\n\t\tcerts = append(certs, cert)\n\t\tok = true\n\t}\n\n\tif !ok {\n\t\treturn certs, errors.New(\"could not read any certificates\")\n\t}\n\treturn certs, nil\n}", "func readCert(t *testing.T) []byte {\n\tcert, err := ioutil.ReadFile(\"testdata/root.pem\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error reading cert: %s\", err.Error())\n\t}\n\treturn cert\n}", "func readCert(t *testing.T) []byte {\n\tcert, err := ioutil.ReadFile(\"testdata/root.pem\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error reading cert: %s\", err.Error())\n\t}\n\treturn cert\n}", "func parseCertificate(cert []byte) (*x509.Certificate, error) {\n\tblock, _ := pem.Decode(cert)\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse PEM certificate\")\n\t}\n\n\tx509Cert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse certificate: %w\", err)\n\t}\n\n\treturn x509Cert, nil\n}", "func ReadCert(filename string) (*x509.Certificate, error) {\n\tblock, err := ReadBlock(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := IsType(block, certPEMType); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse certificate: %w\", err)\n\t}\n\n\treturn cert, nil\n}", "func readCertFile(filename string) []byte {\n\tdataBytes, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tzap.S().Fatalf(\"Failed to read certificate or key file `%s` : `%s`\", filename, err)\n\t}\n\n\treturn dataBytes\n}", "func pemsCert(encoded []byte) (PemEncoded, *x509.Certificate, error) {\n\tvar block *pem.Block\n\tvar cert *x509.Certificate\n\tvar encPems PemEncoded\n\tvar aPem []byte\n\tfor {\n\t\taPem, encoded = nextPem(encoded)\n\t\t// scan, find and parse PEM blocks\n\t\tblock, _ = pem.Decode(aPem)\n\t\tswitch {\n\t\tcase block == nil: // end of scan, no more PEMs found\n\t\t\treturn encPems, cert, nil\n\t\tcase strings.HasSuffix(block.Type, \"PRIVATE KEY\"):\n\t\t\t// PKCS#1 and PKCS#8 matching to find private key\n\t\t\tencPems.Key = string(pem.EncodeToMemory(block))\n\t\t\tcontinue\n\t\t}\n\t\t// CERTIFICATE PEM blocks (Cert and CA) are left\n\t\tmaybeCert, err := x509.ParseCertificate(block.Bytes)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn PemEncoded{}, nil, err\n\t\tcase maybeCert.IsCA:\n\t\t\tencPems.CA = string(pem.EncodeToMemory(block))\n\t\tdefault: // the certificate\n\t\t\tcert = maybeCert\n\t\t\tencPems.Cert = string(pem.EncodeToMemory(block))\n\t\t}\n\t}\n}", "func LoadPrivateCertFromFilePEM(certPath, keyPath string) (*FullCert, error) {\n\tcertBytes, err := ioutil.ReadFile(certPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyBytes, err := ioutil.ReadFile(keyPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn LoadPrivateCertPEM(certBytes, keyBytes)\n}", "func decodeCertPEM(encoded []byte) (*x509.Certificate, error) {\n\tif len(encoded) == 0 {\n\t\treturn nil, fmt.Errorf(\"empty certificate\")\n\t}\n\tblock, _ := pem.Decode(encoded)\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"unable to decode PEM encoded text\")\n\t}\n\n\treturn x509.ParseCertificate(block.Bytes)\n}", "func decodePEM(pemFilePath string) ([]byte, error) {\n\tkeyFile, err := os.Open(pemFilePath)\n\tdefer keyFile.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpemfileinfo, _ := keyFile.Stat()\n\tpembytes := make([]byte, pemfileinfo.Size())\n\n\tbuffer := bufio.NewReader(keyFile)\n\t_, err = buffer.Read(pembytes)\n\n\tdata, _ := pem.Decode([]byte(pembytes))\n\treturn data.Bytes, err\n}", "func parseCerts(certFilePath string, keyFilePath string, passphrase string) ([]tls.Certificate, error) {\n\tif certFilePath == \"\" && keyFilePath == \"\" {\n\t\treturn nil, fmt.Errorf(\"No file path specified for TLS key and certificate in environment variables\")\n\t}\n\n\terrMessage := \"Could not load X509 key pair. \"\n\n\tcert, err := ioutil.ReadFile(certFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errMessage, err)\n\t}\n\n\tprKeyBytes, err := ioutil.ReadFile(keyFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errMessage, err)\n\t}\n\n\tprKeyBytes, err = decodePrivateKey(prKeyBytes, passphrase)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errMessage, err)\n\t}\n\n\ttlsCert, err := tls.X509KeyPair(cert, prKeyBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errMessage, err)\n\t}\n\n\treturn []tls.Certificate{tlsCert}, nil\n}", "func parseCerts(certFilePath string, keyFilePath string, passphrase string) ([]tls.Certificate, error) {\n\tif certFilePath == \"\" && keyFilePath == \"\" {\n\t\treturn nil, fmt.Errorf(\"No file path specified for TLS key and certificate in environment variables\")\n\t}\n\n\terrMessage := \"Could not load X509 key pair. \"\n\n\tcert, err := ioutil.ReadFile(certFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errMessage, err)\n\t}\n\n\tprKeyBytes, err := ioutil.ReadFile(keyFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errMessage, err)\n\t}\n\n\tprKeyBytes, err = decodePrivateKey(prKeyBytes, passphrase)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errMessage, err)\n\t}\n\n\ttlsCert, err := tls.X509KeyPair(cert, prKeyBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errMessage, err)\n\t}\n\n\treturn []tls.Certificate{tlsCert}, nil\n}", "func LoadCertificatesFromPEM(pem io.Reader) ([]*x509.Certificate, error) {\n\tfileBytes, err := io.ReadAll(pem)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn UnmarshalCertificatesFromPEM(fileBytes)\n}", "func ReadPEM(cert []byte) (*x509.Certificate, error) {\n\tder, _ := pem.Decode(cert)\n\tif der == nil {\n\t\treturn nil, errors.New(\"certificate doesn't contain a PEM encoded key\")\n\t}\n\n\tc, err := x509.ParseCertificate(der.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot parse certificate: %s\", err.Error())\n\t}\n\n\treturn c, nil\n}", "func readCertificate(path string) (*x509.Certificate, error) {\n\tcertBytes, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, trace.ConvertSystemError(err)\n\t}\n\tblock, _ := pem.Decode(certBytes)\n\tif block == nil {\n\t\treturn nil, trace.BadParameter(\"failed to decode certificate at %v\", path)\n\t}\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\treturn cert, nil\n}", "func UnmarshalCertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {\n\tresult := []*x509.Certificate{}\n\tremaining := pemBytes\n\n\tfor len(remaining) > 0 {\n\t\tvar certDer *pem.Block\n\t\tcertDer, remaining = pem.Decode(remaining)\n\n\t\tif certDer == nil {\n\t\t\treturn nil, errors.New(\"error during PEM decoding\")\n\t\t}\n\n\t\tcert, err := x509.ParseCertificate(certDer.Bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, cert)\n\t}\n\treturn result, nil\n}", "func PEMToCertificate(pemBytes []byte) (*x509.Certificate, []byte, error) {\n\tpemBlock, rest := pem.Decode(pemBytes)\n\tif pemBlock == nil {\n\t\treturn nil, rest, fmt.Errorf(\"encryptoutil: no PEM data found\")\n\t}\n\n\tcert, err := x509.ParseCertificate(pemBlock.Bytes)\n\tif err != nil {\n\t\treturn nil, rest, fmt.Errorf(\"encryptoutil: certificate failed to parse: %v\", err)\n\t}\n\n\treturn cert, rest, nil\n}", "func PEMCertificate(p string) (cert *x509.Certificate, err error) {\n\tder, err := PEMDecode(p, \"CERTIFICATE\")\n\tif err != nil {\n\t\treturn nil, PEMCertificateDecodeError{Err: err}\n\t}\n\tcert, err = x509.ParseCertificate(der)\n\tif err != nil {\n\t\treturn nil, PEMCertParseX509Error{Err: err}\n\t}\n\treturn\n}", "func getPemCert(token *jwt.Token) (string, error) {\n\tcert := \"\"\n\t// get the JSON Web Keys file from auth0\n\tresp, err := http.Get(\"https://\" + os.Getenv(\"AUTH0_DOMAIN\") + \"/.well-known/jwks.json\")\n\n\tif err != nil {\n\t\treturn cert, err\n\t}\n\tdefer resp.Body.Close()\n\n\t// unmarshall jwks into Jwks struct\n\tvar jwks = Jwks{}\n\terr = json.NewDecoder(resp.Body).Decode(&jwks)\n\n\tif err != nil {\n\t\treturn cert, err\n\t}\n\n\t// iterate through keys until a matching key is found\n\tfor k, _ := range jwks.Keys {\n\t\tif token.Header[\"kid\"] == jwks.Keys[k].Kid {\n\t\t\t// create a pem certificate from the matching key from the x5c JWK property\n\t\t\t// \"The x.509 certificate chain. The first entry in the array is the certificate\n\t\t\t// to use for token verification; the other certificates can be used to verify\n\t\t\t// this first certificate.\"\n\t\t\t// https://auth0.com/docs/tokens/references/jwks-properties\n\t\t\tcert = \"-----BEGIN CERTIFICATE-----\\n\" + jwks.Keys[k].X5c[0] + \"\\n-----END CERTIFICATE-----\"\n\t\t}\n\t}\n\n\tif cert == \"\" {\n\t\terr := errors.New(\"Unable to find appropriate key.\")\n\t\treturn cert, err\n\t}\n\n\treturn cert, nil\n}", "func parseCaCert(cc []byte) ([]byte, error) {\n\n\t// decode Pem from certificate into block\n\tblock, rest := pem.Decode([]byte(cc))\n\tif block == nil {\n\t\tif klog.V(3) {\n\t\t\ts := string(rest)\n\t\t\tklog.Infof(\"tried to decode pem: %v\", s)\n\t\t}\n\t\treturn nil, errors.New(\"error decoding the pem block\")\n\t}\n\n\t// parse the decoded pem block to x509 encoded block\n\tb, err := tryParseX509(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// encodes the x509 encoded block to a valid x509 certificate encoded pem.\n\tpem := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: b,\n\t})\n\n\treturn pem, nil\n}", "func certificatesFromFile(file string) ([]*x509.Certificate, error) {\n\tif len(file) == 0 {\n\t\treturn nil, errors.New(\"error reading certificates from an empty filename\")\n\t}\n\tpemBlock, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcerts, err := CertsFromPEM(pemBlock)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading %s: %s\", file, err)\n\t}\n\treturn certs, nil\n}", "func ParsePemCSR(csrPem []byte) (*x509.CertificateRequest, error) {\n\tblock, _ := pem.Decode(csrPem)\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"certificate signing request is not properly encoded\")\n\t}\n\tcsr, err := x509.ParseCertificateRequest(block.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse X.509 certificate signing request: %s\", err)\n\t}\n\treturn csr, nil\n}", "func testOpenSSLParse(t *testing.T, certBytes []byte) {\n\ttmpCertFile, err := ioutil.TempFile(\"\", \"testCertificate\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.Remove(tmpCertFile.Name()) // clean up\n\n\tif _, err := tmpCertFile.Write(certBytes); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\topensslCMD := exec.Command(\"openssl\", \"pkcs7\", \"-inform\", \"der\", \"-in\", tmpCertFile.Name())\n\t_, err = opensslCMD.Output()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := tmpCertFile.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n}", "func getCert(data []byte) ([]byte, error) {\n\tvar certs []byte\n\tfor {\n\t\tpemBlock, rest := pem.Decode(data)\n\t\tif pemBlock == nil {\n\t\t\tbreak\n\t\t}\n\t\tif pemBlock.Type == certType {\n\t\t\tblock := pem.EncodeToMemory(pemBlock)\n\t\t\tcerts = append(certs, block...)\n\t\t}\n\t\tdata = rest\n\t}\n\treturn certs, nil\n}", "func DecodePEMCertificateToX509(pemData string) (*x509.Certificate, error) {\n\tdecodedData, _ := pem.Decode([]byte(pemData))\n\tif decodedData == nil {\n\t\treturn nil, errors.New(\"could not decode PEM data from input\")\n\t}\n\tx509Cert, err := x509.ParseCertificate(decodedData.Bytes)\n\tif err != nil {\n\t\treturn nil, errors.New(\"could not parse X509 data from input\")\n\t}\n\treturn x509Cert, nil\n}", "func ParsePEM(data, header string) ([]byte, error) {\n\tblock, rest := pem.Decode([]byte(data))\n\tif len(rest) != 0 || block == nil {\n\t\treturn nil, fmt.Errorf(\"not a valid %q PEM\", header)\n\t}\n\tif block.Type != header {\n\t\treturn nil, fmt.Errorf(\"expecting %q, got %q\", header, block.Type)\n\t}\n\treturn block.Bytes, nil\n}", "func LoadCertificateFromPEMBytes(pemBytes []byte) (*Certificate, error) {\n\tblock, _ := pem.Decode(pemBytes)\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"Unable to decode PEM encoded certificate\")\n\t}\n\treturn bytesToCert(block.Bytes)\n}", "func getPemCert(token *jwt.Token) (string, error) {\n\tcert := \"\"\n\n\tresp, err := http.Get(\"https://dev-q7h0r088.us.auth0.com/.well-known/jwks.json\")\n\tif err != nil {\n\t\treturn cert, err\n\t}\n\tdefer resp.Body.Close()\n\n\t// JSONWebKeys are defined by Auth0, part of their handshake.\n\tvar jwks = Jwks{}\n\terr = json.NewDecoder(resp.Body).Decode(&jwks)\n\tif err != nil {\n\t\treturn cert, err\n\t}\n\n\tfor k := range jwks.Keys {\n\t\tif token.Header[\"kid\"] == jwks.Keys[k].Kid {\n\t\t\tcert = \"-----BEGIN CERTIFICATE-----\\n\" + jwks.Keys[k].X5c[0] + \"\\n-----END CERTIFICATE-----\"\n\t\t}\n\t}\n\n\tif cert == \"\" {\n\t\treturn cert, errors.New(\"Unable to find appropriate key.\")\n\t}\n\n\treturn cert, nil\n}", "func ReadCertificate(certPath string) (interface{}, error) {\n\tbytes, err := ioutil.ReadFile(certPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblock, _ := pem.Decode(bytes)\n\tvar cert *x509.Certificate\n\tcert, err = x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cert.PublicKey, nil\n}", "func LoadPrivateCertPEM(bytesCert []byte, keyBytes []byte) (*FullCert, error) {\n\tcertDERBlock, _ := pem.Decode(bytesCert)\n\tif certDERBlock == nil {\n\t\treturn nil, errors.New(\"No certificate data read from PEM\")\n\t}\n\tcert, err := x509.ParseCertificate(certDERBlock.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkeyBlock, _ := pem.Decode(keyBytes)\n\tif keyBlock == nil {\n\t\treturn nil, errors.New(\"No key data read from PEM\")\n\t}\n\tpriv, err := x509.ParsePKCS1PrivateKey(keyBlock.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FullCert{\n\t\tCert: cert,\n\t\tKey: priv,\n\t\tDerBytes: certDERBlock.Bytes,\n\t}, nil\n}", "func ReadPEM(b []byte) (*rsa.PrivateKey, error) {\n\n\tder, _ := pem.Decode(b)\n\tif der == nil {\n\t\treturn nil, errors.New(\"private key file doesn't contain a PEM encoded key\")\n\t}\n\n\tkey, err := x509.ParsePKCS1PrivateKey(der.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot parse signing key file: %s\", err.Error())\n\t}\n\n\treturn key, nil\n}", "func ReadCertificate(publicKeyFile, privateKeyFile string) (*KeyPair, error) {\n\tcert := new(KeyPair)\n\n\tprivKey, errRead := ioutil.ReadFile(privateKeyFile)\n\tif errRead != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read private key: %w\", errRead)\n\t}\n\n\tprivPemBlock, _ := pem.Decode(privKey)\n\n\t// Note that we use PKCS1 to parse the private key here.\n\tparsedPrivKey, errParse := x509.ParsePKCS1PrivateKey(privPemBlock.Bytes)\n\tif errParse != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse private key: %w\", errParse)\n\t}\n\n\tcert.PrivateKey = parsedPrivKey\n\n\tpubKey, errRead := ioutil.ReadFile(publicKeyFile)\n\tif errRead != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read public key: %w\", errRead)\n\t}\n\n\tpublicPemBlock, _ := pem.Decode(pubKey)\n\n\tparsedPubKey, errParse := x509.ParseCertificate(publicPemBlock.Bytes)\n\tif errParse != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse public key: %w\", errParse)\n\t}\n\n\tcert.PublicKey = parsedPubKey\n\n\treturn cert, nil\n}", "func readCAFile(f string) ([]byte, error) {\n\tdata, err := ioutil.ReadFile(f)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to load specified CA cert %s: %s\", f, err)\n\t}\n\treturn data, nil\n}", "func ParsePEM(pemBytes []byte) (*JWK, error) {\n\tfor len(pemBytes) > 0 {\n\t\tpemBlock, rest := pem.Decode(pemBytes)\n\t\tif pemBlock == nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid PEM file\")\n\t\t}\n\t\tpemBytes = rest\n\n\t\tpr, ok := blockProcessors[pemBlock.Type]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"unsupported PEM block type: %s\", pemBlock.Type)\n\t\t}\n\n\t\tjwk, err := pr(pemBlock.Bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif jwk == nil {\n\t\t\tcontinue\n\t\t}\n\t\treturn jwk, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"no supported PEM block found\")\n}", "func readCACert(caCertPath string) ([]byte, error) {\n\tcaCert, err := os.ReadFile(caCertPath)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to read CA cert, cert. path: %v, error: %v\", caCertPath, err)\n\t\treturn nil, fmt.Errorf(\"failed to read CA cert, cert. path: %v, error: %v\", caCertPath, err)\n\t}\n\n\tb, _ := pem.Decode(caCert)\n\tif b == nil {\n\t\treturn nil, fmt.Errorf(\"could not decode pem\")\n\t}\n\tif b.Type != \"CERTIFICATE\" {\n\t\treturn nil, fmt.Errorf(\"ca certificate contains wrong type: %v\", b.Type)\n\t}\n\tif _, err := x509.ParseCertificate(b.Bytes); err != nil {\n\t\treturn nil, fmt.Errorf(\"ca certificate parsing returns an error: %v\", err)\n\t}\n\n\treturn caCert, nil\n}", "func ParseCert(binCert []byte) (common.Address, common.Address, string, error) {\n\tvar contrAddr, parentAddr common.Address\n\tvar retDesc string\n\tca, err := x509.ParseCertificate(binCert)\n\tif err!=nil {\n\t\treturn common.Address{}, common.Address{}, \"\", err\n\t}\n\n\tfor i:=0; i<len(ca.Subject.Names); i++ {\n\t\tretDesc += fmt.Sprint(ca.Subject.Names[i].Value) + \" \";\n\t}\n\t// iterate in the extension to get the information\n\tfor _, element := range ca.Extensions {\n\t\tif element.Id.String() == \"1.2.752.115.33.2\" { // CA Address\n\t\t\tfmt.Printf(\"\\tCaContractIdentifier: %+#+x\\n\", element.Value)\n\t\t\tval:=element.Value[2:]\n\t\t\tif( len(val) != len(common.Address{}.Bytes()) ) {\n\t\t\t\treturn common.Address{}, common.Address{}, \"\",\n\t\t\t\t\tGeneralError{\"ParseCert: wrong length of CA addr\"}\n\t\t\t}\n\t\t\tcontrAddr = common.BytesToAddress(val)\n\t\t}\n\t\tif element.Id.String() == \"1.2.752.115.33.1\" { //Parent Address\n\t\t\tfmt.Printf(\"\\tIssuerCaContractIdentifier: %+#+x\\n\", element.Value)\n\t\t\tval:=element.Value[2:]\n\t\t\tif( len(val) != len(common.Address{}.Bytes()) ) {\n\t\t\t\treturn common.Address{}, common.Address{}, \"\",\n\t\t\t\t\tGeneralError{\"ParseCert: wrong length of CA addr\"}\n\t\t\t}\n\t\t\tparentAddr = common.BytesToAddress(val)\n\t\t}\n\t}\n\treturn contrAddr, parentAddr, retDesc, nil\n}", "func parsePemPrivateKey(block *pem.Block) (interface{}, error) {\n\tif !strings.HasSuffix(block.Type, \"PRIVATE KEY\") {\n\t\treturn nil, fmt.Errorf(\"unknown pem type %s, expecting PRIVATE KEY suffix\", block.Type)\n\t}\n\tder := block.Bytes\n\tif key, err := x509.ParsePKCS1PrivateKey(der); err == nil {\n\t\treturn key, nil\n\t}\n\tif key, err := x509.ParsePKCS8PrivateKey(der); err == nil {\n\t\tswitch key := key.(type) {\n\t\tcase *rsa.PrivateKey, *ecdsa.PrivateKey:\n\t\t\treturn key, nil\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"tls: found unknown private key type in PKCS#8 wrapping\")\n\t\t}\n\t}\n\tif key, err := x509.ParseECPrivateKey(der); err == nil {\n\t\treturn key, nil\n\t}\n\n\treturn nil, errors.New(\"tls: failed to parse private key\")\n}", "func PEMCertificates(pems ...string) (certs []*x509.Certificate, err error) {\n\tfor i, pem := range pems {\n\t\tcert, err := PEMCertificate(pem)\n\t\tif err != nil {\n\t\t\treturn nil, PEMCertificateError{\n\t\t\t\tIdx: i,\n\t\t\t\tErr: err,\n\t\t\t}\n\t\t}\n\t\tcerts = append(certs, cert)\n\t}\n\treturn\n}", "func GetX509CertificateFromPEM(cert []byte) (*x509.Certificate, error) {\n\tblock, _ := pem.Decode(cert)\n\tif block == nil {\n\t\treturn nil, errors.New(\"Failed to PEM decode certificate\")\n\t}\n\tx509Cert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Error parsing certificate\")\n\t}\n\treturn x509Cert, nil\n}", "func ReadCerts(filename string) ([]*x509.Certificate, error) {\n\tblocks, err := ReadBlocks(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar certs []*x509.Certificate\n\n\tfor _, block := range blocks {\n\t\tif err := IsType(block, certPEMType); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse certificate: %w\", err)\n\t\t}\n\n\t\tcerts = append(certs, cert)\n\t}\n\n\treturn certs, nil\n}", "func LoadCertificatesFrom(pemFile string) (*x509.CertPool, error) {\n\tcaCert, err := ioutil.ReadFile(pemFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcertificates := x509.NewCertPool()\n\tcertificates.AppendCertsFromPEM(caCert)\n\treturn certificates, nil\n}", "func DecodePEMCertificates(crtb []byte) ([]*x509.Certificate, error) {\n\tcerts := []*x509.Certificate{}\n\tfor len(crtb) > 0 {\n\t\tvar err error\n\t\tvar cert *x509.Certificate\n\n\t\tcert, crtb, err = decodeCertificatePEM(crtb)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif cert != nil {\n\t\t\t// it's a cert, add to pool\n\t\t\tcerts = append(certs, cert)\n\t\t}\n\t}\n\n\tif len(certs) == 0 {\n\t\treturn nil, errors.New(\"no certificates found\")\n\t}\n\n\treturn certs, nil\n}", "func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {\n\tcert, err := ioutil.ReadFile(certfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkey, err := ioutil.ReadFile(keyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif parseFunc == nil {\n\t\tparseFunc = tls.X509KeyPair\n\t}\n\n\ttlsCert, err := parseFunc(cert, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &tlsCert, nil\n}", "func ParseCertificate(asn1Data []byte) (*x509.Certificate, error) {\n\tvar cert certificate\n\trest, err := asn1.Unmarshal(asn1Data, &cert)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(rest) > 0 {\n\t\treturn nil, asn1.SyntaxError{Msg: \"trailing data\"}\n\t}\n\n\treturn parseCertificate(&cert)\n}", "func PEMToCertificate(certPEM []byte) (*x509.Certificate, error) {\n\treturn tls.PEMToCertificate(certPEM)\n}", "func parseKeyFile(filename string) (interface{}, error) {\n\tkt, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblock, _ := pem.Decode(kt)\n\tkey, err := x509.ParsePKCS8PrivateKey(block.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn key, nil\n}", "func (raw asn1RawCertificates) Parse() ([]*x509.Certificate, error) {\n\tif len(raw.RawCerts) == 0 {\n\t\treturn nil, errors.New(\"rawCertificates is empty\")\n\t}\n\n\tvar val asn1.RawValue\n\tif _, err := asn1.Unmarshal(raw.RawCerts, &val); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn x509.ParseCertificates(val.Bytes)\n}", "func ParseCertificate(in []byte) (*ssh.Certificate, error) {\n\tpub, err := ssh.ParsePublicKey(in)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error parsing certificate\")\n\t}\n\tcert, ok := pub.(*ssh.Certificate)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"error parsing certificate: %T is not a certificate\", pub)\n\t}\n\treturn cert, nil\n}", "func ReadRootCA(RootCACertFile string, RootCAKeyFile string) (rootCA []byte, rootKey []byte, err error) {\n\n // Check if files exist\n rootCAExists, err := FileExists(RootCACertFile)\n if err != nil {\n return nil, nil, err\n }\n\n rootKeyExists, err := FileExists(RootCAKeyFile)\n if err != nil {\n return nil, nil, err\n }\n\n // We need both key and cert to exist\n if (rootCAExists && rootKeyExists) {\n\n // If files exist, read rootCA first\n rootCA, err = ioutil.ReadFile(RootCACertFile)\n if err != nil {\n return nil, nil, errors.New(fmt.Sprintf(\"Error reading %s file\", RootCACertFile))\n }\n\n // Now check if rootCA is a valid DER certificate\n if _, err = x509.ParseCertificate(rootCA); err != nil {\n return nil, nil, err\n }\n\n // Read rootKey\n rootKey, err = ioutil.ReadFile(RootCAKeyFile)\n if err != nil {\n return nil, nil, errors.New(fmt.Sprintf(\"Error reading %s file\", RootCAKeyFile))\n }\n\n // Check if rootKey is a valid key - we already have tlsdump.ParsePrivateKey that does this\n if _, _, err = ParsePrivateKey(rootKey); err != nil {\n return nil, nil, err\n }\n\n return rootCA, rootKey, nil\n\n } else {\n // Custom error text\n var customError = \"\"\n\n if !rootCAExists {\n customError += fmt.Sprintf(\"%s does not exist\", RootCACertFile)\n }\n\n if !rootKeyExists {\n customError += fmt.Sprintf(\"\\n%s does not exist\", RootCAKeyFile)\n }\n\n return nil, nil, errors.New(customError)\n }\n\n // We should not get there (because both if and else have returns) but just in case\n return nil, nil, err\n\n}", "func NewCert(certRaw []byte) (*Cert, error) {\n\tc := &Cert{\n\t\tPem: string(certRaw),\n\t}\n\t// c := certPool.Get().(*Cert)\n\t// c.Pem = string(certRaw)\n\tb, _ := pem.Decode([]byte(certRaw))\n\tif b == nil {\n\t\treturn nil, errors.New(\"decode cert pem error\")\n\t}\n\tcert, err := x509.ParseCertificate(b.Bytes)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"parse cert error\")\n\t}\n\tc.CN = cert.Subject.CommonName\n\tc.OU = strings.Join(cert.Subject.OrganizationalUnit, \",\")\n\tc.Org = strings.Join(cert.Issuer.Organization, \",\")\n\treturn c, nil\n}", "func parseCert(\n\tstamp dnsstamps.ServerStamp,\n\tcurrentCert *Cert,\n\tproviderName string,\n\tcertStr string,\n) (cert *Cert, err error) {\n\tcertBytes, err := unpackTxtString(certStr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unpacking txt record: %w\", err)\n\t}\n\n\tcert = &Cert{}\n\terr = cert.Deserialize(certBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"deserializing cert for: %w\", err)\n\t}\n\n\tlog.Debug(\"[%s] fetched certificate %d\", providerName, cert.Serial)\n\n\tif !cert.VerifyDate() {\n\t\treturn nil, ErrInvalidDate\n\t}\n\n\tif !cert.VerifySignature(stamp.ServerPk) {\n\t\treturn nil, ErrInvalidCertSignature\n\t}\n\n\tif cert.Serial < currentCert.Serial {\n\t\tlog.Debug(\"[%v] cert %d superseded by a previous certificate\", providerName, cert.Serial)\n\n\t\treturn nil, nil\n\t}\n\n\tif cert.Serial > currentCert.Serial {\n\t\treturn cert, nil\n\t}\n\n\tif cert.EsVersion <= currentCert.EsVersion {\n\t\tlog.Debug(\"[%v] keeping the previous, preferred crypto construction\", providerName)\n\n\t\treturn nil, nil\n\t}\n\n\tlog.Debug(\n\t\t\"[%v] upgrading the construction from %v to %v\",\n\t\tproviderName,\n\t\tcurrentCert.EsVersion,\n\t\tcert.EsVersion,\n\t)\n\n\treturn cert, nil\n}", "func ParseFirstCertFromBlock(b []byte) (*x509.Certificate, error) {\n\tcerts, err := parseCertFromBlock(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(certs) <= 0 {\n\t\treturn nil, fmt.Errorf(\"no certs found\")\n\t}\n\n\treturn certs[0], nil\n}", "func LoadCertificateFromFile(filename string) (*Certificate, error) {\n\tcertificateData, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, fmt.Errorf(\"Unable to read certificate file from disk: %s\", err)\n\t}\n\treturn LoadCertificateFromPEMBytes(certificateData)\n}", "func loadCertificate() []webrtc.Certificate {\n\tcertFile, err := ioutil.ReadFile(\"cert.pem\")\n\tpanicIfErr(err)\n\n\tkeyFile, err := ioutil.ReadFile(\"key.pem\")\n\tpanicIfErr(err)\n\n\tcertPem, _ := pem.Decode(certFile)\n\tkeyPem, _ := pem.Decode(keyFile)\n\n\tcert, err := x509.ParseCertificate(certPem.Bytes)\n\tpanicIfErr(err)\n\n\tprivateKey, err := x509.ParsePKCS8PrivateKey(keyPem.Bytes)\n\tpanicIfErr(err)\n\n\treturn []webrtc.Certificate{webrtc.CertificateFromX509(privateKey, cert)}\n}", "func PKCS7ToPEM(data []byte) ([]byte, error) {\n\n var d []byte\n prefix := []byte{'-', '-', '-', '-', '-', 'B', 'E', 'G', 'I', 'N'}\n if bytes.HasPrefix(data, prefix) {\n result, _ := pem.Decode([]byte(data))\n d = result.Bytes\n } else {\n d = data\n }\n\n p7, err := pkcs7.Parse(d)\n\n if err != nil {\n return nil, err\n }\n\n var certsPem []byte\n for _, cert := range p7.Certificates {\n block := pem.Block{\n Type: \"CERTIFICATE\",\n Bytes: cert.Raw,\n }\n certsPem = append(certsPem, pem.EncodeToMemory(&block)...)\n }\n\n return certsPem, err\n}", "func decodeIdentityFile(idFile io.Reader) (*IdentityFile, error) {\n\tscanner := bufio.NewScanner(idFile)\n\tvar ident IdentityFile\n\t// Subslice of scanner's buffer pointing to current line\n\t// with leading and trailing whitespace trimmed.\n\tvar line []byte\n\t// Attempt to scan to the next line.\n\tscanln := func() bool {\n\t\tif !scanner.Scan() {\n\t\t\tline = nil\n\t\t\treturn false\n\t\t}\n\t\tline = bytes.TrimSpace(scanner.Bytes())\n\t\treturn true\n\t}\n\t// Check if the current line starts with prefix `p`.\n\thasPrefix := func(p string) bool {\n\t\treturn bytes.HasPrefix(line, []byte(p))\n\t}\n\t// Get an \"owned\" copy of the current line.\n\tcloneln := func() []byte {\n\t\tln := make([]byte, len(line))\n\t\tcopy(ln, line)\n\t\treturn ln\n\t}\n\t// Scan through all lines of identity file. Lines with a known prefix\n\t// are copied out of the scanner's buffer. All others are ignored.\n\tfor scanln() {\n\t\tswitch {\n\t\tcase isSSHCert(line):\n\t\t\tident.Certs.SSH = append(cloneln(), '\\n')\n\t\tcase hasPrefix(\"@cert-authority\"):\n\t\t\tident.CACerts.SSH = append(ident.CACerts.SSH, append(cloneln(), '\\n'))\n\t\tcase hasPrefix(\"-----BEGIN\"):\n\t\t\t// Current line marks the beginning of a PEM block. Consume all\n\t\t\t// lines until a corresponding END is found.\n\t\t\tvar pemBlock []byte\n\t\t\tfor {\n\t\t\t\tpemBlock = append(pemBlock, line...)\n\t\t\t\tpemBlock = append(pemBlock, '\\n')\n\t\t\t\tif hasPrefix(\"-----END\") {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif !scanln() {\n\t\t\t\t\t// If scanner has terminated in the middle of a PEM block, either\n\t\t\t\t\t// the reader encountered an error, or the PEM block is a fragment.\n\t\t\t\t\tif err := scanner.Err(); err != nil {\n\t\t\t\t\t\treturn nil, trace.Wrap(err)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil, trace.BadParameter(\"invalid PEM block (fragment)\")\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Decide where to place the pem block based on\n\t\t\t// which pem blocks have already been found.\n\t\t\tswitch {\n\t\t\tcase ident.PrivateKey == nil:\n\t\t\t\tident.PrivateKey = pemBlock\n\t\t\tcase ident.Certs.TLS == nil:\n\t\t\t\tident.Certs.TLS = pemBlock\n\t\t\tdefault:\n\t\t\t\tident.CACerts.TLS = append(ident.CACerts.TLS, pemBlock)\n\t\t\t}\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\treturn &ident, nil\n}", "func parsePrivateKey(path string) crypto.PrivateKey {\n\tpkBytes, err := os.ReadFile(path)\n\tExpect(err).NotTo(HaveOccurred())\n\tpemBlock, _ := pem.Decode(pkBytes)\n\tprivateKey, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes)\n\tExpect(err).NotTo(HaveOccurred())\n\treturn privateKey\n}", "func validateCertificatePEM(certPEM string, options *x509.VerifyOptions) ([]*x509.Certificate, error) {\n\tcerts, err := cert.ParseCertsPEM([]byte(certPEM))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(certs) < 1 {\n\t\treturn nil, fmt.Errorf(\"invalid/empty certificate data\")\n\t}\n\n\tif options != nil {\n\t\t// Ensure we don't report errors for expired certs or if\n\t\t// the validity is in the future.\n\t\t// Not that this can be for the actual certificate or any\n\t\t// intermediates in the CA chain. This allows the router to\n\t\t// still serve an expired/valid-in-the-future certificate\n\t\t// and lets the client to control if it can tolerate that\n\t\t// (just like for self-signed certs).\n\t\t_, err = certs[0].Verify(*options)\n\t\tif err != nil {\n\t\t\tif invalidErr, ok := err.(x509.CertificateInvalidError); !ok || invalidErr.Reason != x509.Expired {\n\t\t\t\treturn certs, fmt.Errorf(\"error verifying certificate: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn certs, nil\n}", "func FromFile(certFile, privKeyFile string) (*Entity, error) {\n\tcert, err := tls.LoadX509KeyPair(certFile, privKeyFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load x509 key pair: %v\", err)\n\t}\n\tif cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse certificate: %v\", err)\n\t}\n\n\treturn &Entity{PrivateKey: cert.PrivateKey, Certificate: &cert}, nil\n}", "func certificate(authOpts *AuthOptions) (cert *tls.Certificate, err error) {\n\tvar c tls.Certificate\n\tswitch {\n\tcase authOpts.pemFile != \"\":\n\t\tc, err = apns.LoadPemFile(authOpts.pemFile)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tcert = &c\n\n\tcase authOpts.cerFile != \"\" && authOpts.keyFile != \"\":\n\t\tc, err = tls.LoadX509KeyPair(authOpts.cerFile, authOpts.keyFile)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tcert = &c\n\n\tdefault:\n\t\tcert, err = nil, nil\n\t}\n\treturn\n}", "func LoadKeyPairFrom(pemFile string, privateKeyPemFile string) (tls.Certificate, error) {\n\ttargetPrivateKeyPemFile := privateKeyPemFile\n\tif len(targetPrivateKeyPemFile) <= 0 {\n\t\ttargetPrivateKeyPemFile = pemFile\n\t}\n\treturn tls.LoadX509KeyPair(pemFile, targetPrivateKeyPemFile)\n}", "func nextPem(encoded []byte) (aPem []byte, theRest []byte) {\n\tstart := bytes.Index(encoded, []byte(\"-----BEGIN\"))\n\tif start >= 0 { // finds the PEM and pulls it to decode\n\t\tencoded = encoded[start:] // clip pre-pem junk\n\t\t// find the end\n\t\tend := bytes.Index(encoded, []byte(\"-----END\")) + 8\n\t\tend = end + bytes.Index(encoded[end:], []byte(\"-----\")) + 5\n\t\t// the PEM padded with newlines (what pem.Decode likes)\n\t\taPem = append([]byte(\"\\n\"), encoded[:end]...)\n\t\taPem = append(aPem, []byte(\"\\n\")...)\n\t\ttheRest = encoded[end:] // the rest\n\t}\n\treturn aPem, theRest\n}", "func LoadPrivateCertFromFile(certPath, keyPath string) (*FullCert, error) {\n\tcertBytes, err := ioutil.ReadFile(certPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyBytes, err := ioutil.ReadFile(keyPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn LoadPrivateCert(certBytes, keyBytes)\n}", "func (c WebCredential) GetPem(client *Client) ([]CredentialPayload, error) {\n\n\t// Get P12 and password.\n\tcreds, err := c.GetP12(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Filename constructed by replacing .p12 suffix with .pem\n\tbundle := strings.TrimSuffix(c.Bundle, \".p12\") + \".pem\"\n\n\tf := bytes.Buffer{}\n\n\tblocks, err := pkcs12.ToPEM(creds.P12, string(creds.Password))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, block := range blocks {\n\n\t\t// Some things (OpenSSL) want to see EC PRIVATE KEY\n\t\tif block.Type == \"PRIVATE KEY\" {\n\t\t\tblock.Type = \"EC PRIVATE KEY\"\n\t\t}\n\n\t\t// Some things (OpenSSL) don't like headers\n\t\tblock.Headers = map[string]string{}\n\t\tf.Write(pem.EncodeToMemory(block))\n\n\t}\n\n\tpay := make([]byte, len(f.Bytes()))\n\tcopy(pay, f.Bytes())\n\n\treturn []CredentialPayload{\n\t\t{\n\t\t\t\"pem\",\n\t\t\t\"pem\",\n\t\t\t\"PEM bundle\",\n\t\t\t\"store\",\n\t\t\tbundle,\n\t\t\tpay,\n\t\t},\n\t}, nil\n\n}", "func LoadCertificate(path string) (*x509.Certificate, error) {\n\trawData, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, errors.WithStackTrace(err)\n\t}\n\tcertificatePemBlock, _ := pem.Decode(rawData)\n\tcertificate, err := x509.ParseCertificate(certificatePemBlock.Bytes)\n\tif err != nil {\n\t\treturn nil, errors.WithStackTrace(err)\n\t}\n\treturn certificate, nil\n}", "func ParsePEM(block *pem.Block) (crypto.Signer, error) {\n\tswitch block.Type {\n\tcase \"PRIVATE KEY\":\n\t\tprivateKey, err := x509.ParsePKCS8PrivateKey(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts, ok := privateKey.(crypto.Signer)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"unable to cast to a signer\")\n\t\t}\n\t\treturn s, nil\n\tcase \"EC PRIVATE KEY\":\n\t\treturn x509.ParseECPrivateKey(block.Bytes)\n\tcase \"RSA PRIVATE KEY\":\n\t\treturn x509.ParsePKCS1PrivateKey(block.Bytes)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported type '%s'\", block.Type)\n\t}\n}", "func CertToPEMFile(cert *x509.Certificate, filename string) error {\r\n\treturn CertDERToPEMFile(cert.Raw, filename)\r\n}", "func parseCertificates(der [][]byte) ([]*x509.Certificate, error) {\n\tvar err error\n\tcerts := make([]*x509.Certificate, len(der))\n\tfor i, c := range der {\n\t\tcerts[i], err = x509.ParseCertificate(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn certs, nil\n}", "func ExtractPrivateKeyFromX509PEM(keyPEM []byte) (*rsa.PrivateKey, error) {\n\n\tblock, _ := pem.Decode(keyPEM)\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse PEM block containing the private key:%v\", keyPEM)\n\t}\n\n\tswitch block.Type {\n\tcase \"RSA PRIVATE KEY\":\n\t\trsa, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn rsa, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"private key: unsupported key type %q\", block.Type)\n\t}\n\n}", "func ParseFileSimple(certFile string) (parseCerSimple model.ParseCerSimple, err error) {\n\tbelogs.Info(\"parseCerSimple(): certFile:\", certFile)\n\tif strings.HasSuffix(certFile, \".cer\") {\n\t\treturn ParseCerSimpleModel(certFile)\n\t}\n\treturn parseCerSimple, errors.New(\"unknown file type\")\n}", "func ReadCertificate(data []byte) (certificate *Certificate, remainder []byte, err error) {\n\tcertificate, err = NewCertificate(data)\n\tif err != nil && err.Error() == \"certificate parsing warning: certificate data is longer than specified by length\" {\n\t\tremainder = certificate.ExcessBytes()\n\t\terr = nil\n\t}\n\treturn\n}", "func NewFromPEM(req *csr.CertificateRequest, keyFile string) (cert, csrPEM []byte, err error) {\n\tif req.CA != nil {\n\t\tif req.CA.Expiry != \"\" {\n\t\t\tCAPolicy.Default.ExpiryString = req.CA.Expiry\n\t\t\tCAPolicy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)\n\t\t}\n\n\t\tif req.CA.PathLength != 0 {\n\t\t\tsigner.MaxPathLen = req.CA.PathLength\n\t\t}\n\t}\n\n\tprivData, err := ioutil.ReadFile(keyFile)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpriv, err := helpers.ParsePrivateKeyPEM(privData)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar sigAlgo x509.SignatureAlgorithm\n\tswitch priv := priv.(type) {\n\tcase *rsa.PrivateKey:\n\t\tbitLength := priv.PublicKey.N.BitLen()\n\t\tswitch {\n\t\tcase bitLength >= 4096:\n\t\t\tsigAlgo = x509.SHA512WithRSA\n\t\tcase bitLength >= 3072:\n\t\t\tsigAlgo = x509.SHA384WithRSA\n\t\tcase bitLength >= 2048:\n\t\t\tsigAlgo = x509.SHA256WithRSA\n\t\tdefault:\n\t\t\tsigAlgo = x509.SHA1WithRSA\n\t\t}\n\tcase *ecdsa.PrivateKey:\n\t\tswitch priv.Curve {\n\t\tcase elliptic.P521():\n\t\t\tsigAlgo = x509.ECDSAWithSHA512\n\t\tcase elliptic.P384():\n\t\t\tsigAlgo = x509.ECDSAWithSHA384\n\t\tcase elliptic.P256():\n\t\t\tsigAlgo = x509.ECDSAWithSHA256\n\t\tdefault:\n\t\t\tsigAlgo = x509.ECDSAWithSHA1\n\t\t}\n\tdefault:\n\t\tsigAlgo = x509.UnknownSignatureAlgorithm\n\t}\n\n\tvar tpl = x509.CertificateRequest{\n\t\tSubject: req.Name(),\n\t\tSignatureAlgorithm: sigAlgo,\n\t\tDNSNames: req.Hosts,\n\t}\n\n\tcsrPEM, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to generate a CSR: %v\", err)\n\t\t// The use of CertificateError was a matter of some\n\t\t// debate; it is the one edge case in which a new\n\t\t// error category specifically for CSRs might be\n\t\t// useful, but it was deemed that one edge case did\n\t\t// not a new category justify.\n\t\terr = cferr.Wrap(cferr.CertificateError, cferr.BadRequest, err)\n\t\treturn\n\t}\n\n\tp := &pem.Block{\n\t\tType: \"CERTIFICATE REQUEST\",\n\t\tBytes: csrPEM,\n\t}\n\tcsrPEM = pem.EncodeToMemory(p)\n\n\ts, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to create signer: %v\", err)\n\t\treturn\n\t}\n\ts.SetPolicy(CAPolicy)\n\n\tsignReq := signer.SignRequest{Request: string(csrPEM)}\n\tcert, err = s.Sign(signReq)\n\treturn\n}", "func UnmarshalCert(bytes []byte) (InfoData, error) {\n\tif bytes == nil {\n\t\treturn nil, fmt.Errorf(\"no data given\")\n\t}\n\tdata := &CertificateJSONData{}\n\terr := json.Unmarshal(bytes, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewCertificateInfoData(data.PrivateKey, data.Certificate), nil\n}", "func GetPrivateKeyFromFile(keyFile string) (*ecdsa.PrivateKey, error) {\n\tkeyPEMBlock, err := os.ReadFile(keyFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//Following logic is derived from steps in\n\t//https://golang.org/src/crypto/tls/tls.go:X509KeyPair()\n\tvar keyDERBlock *pem.Block\n\tvar skippedBlockTypes []string\n\tfor {\n\t\tkeyDERBlock, keyPEMBlock = pem.Decode(keyPEMBlock)\n\t\tif keyDERBlock == nil {\n\t\t\tif len(skippedBlockTypes) == 0 {\n\t\t\t\treturn nil, errors.New(\"Failed to find any PEM data in key input\")\n\t\t\t}\n\t\t\tif len(skippedBlockTypes) == 1 && skippedBlockTypes[0] == \"CERTIFICATE\" {\n\t\t\t\treturn nil, errors.New(\"Got a certificate instead of key\")\n\t\t\t}\n\t\t\treturn nil, errors.New(\"No PEM block found with type PRIVATE KEY\")\n\t\t}\n\t\tif keyDERBlock.Type == \"PRIVATE KEY\" ||\n\t\t\tstrings.HasSuffix(keyDERBlock.Type, \"EC PRIVATE KEY\") {\n\t\t\tbreak\n\t\t}\n\t\tskippedBlockTypes = append(skippedBlockTypes, keyDERBlock.Type)\n\t}\n\tif key, err := x509.ParsePKCS8PrivateKey(keyDERBlock.Bytes); err == nil {\n\t\tvar pkey *ecdsa.PrivateKey\n\t\tvar ok bool\n\t\tif pkey, ok = key.(*ecdsa.PrivateKey); !ok {\n\t\t\treturn nil, errors.New(\"Private key is not ecdsa type\")\n\t\t}\n\t\treturn pkey, nil\n\t}\n\tif key, err := x509.ParseECPrivateKey(keyDERBlock.Bytes); err == nil {\n\t\treturn key, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}", "func loadCertificate() {\n\tcertMutex.Lock()\n\tdefer certMutex.Unlock()\n\tif certificateData, err := ioutil.ReadFile(CertificateFile); err != nil {\n\t\tlog.Printf(\"Unable to read certificate file from disk: %s\", err)\n\t\tinitCertificate()\n\t} else {\n\t\tblock, _ := pem.Decode(certificateData)\n\t\tif block == nil {\n\t\t\tlog.Print(\"Unable to decode PEM encoded certificate\")\n\t\t\tinitCertificate()\n\t\t} else {\n\t\t\tcertificate, err = x509.ParseCertificate(block.Bytes)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"Unable to decode X509 certificate data\")\n\t\t\t\tinitCertificate()\n\t\t\t}\n\t\t\tlog.Printf(\"Read certificate\")\n\t\t}\n\t}\n\n\t// Add ourselves to the trust store\n\tTrustedParents.AddCert(certificate)\n}", "func convertPemToDer(asn1Data string) ([]byte, error) {\n\tvar block *pem.Block\n\tvar pemByte []byte\n\n\tblock, pemByte = pem.Decode([]byte(asn1Data))\n\n\tfmt.Println(block)\n\n\tfmt.Println(pemByte)\n\n\t// just a single certificate\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn []byte{}, err\n\t}\n\treturn cert.Raw, err\n\n}", "func (c *JSONCertificateWithRaw) ParseRaw() (*Certificate, error) {\n\treturn ParseCertificate(c.Raw)\n}", "func parsePvtKey(pemKey []byte) (*ecdsa.PrivateKey, error) {\n\tkey, _ := pem.Decode(pemKey)\n\tif key == nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode PEM Key\")\n\t}\n\n\tecKey, err := x509.ParseECPrivateKey(key.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse EC Private Key: %w\", err)\n\t}\n\n\treturn ecKey, nil\n}", "func ReadCertificateAuthority(publicKeyFile, privateKeyFile string) (*KeyPair, error) {\n\troot := new(KeyPair)\n\n\trootKey, errRead := ioutil.ReadFile(privateKeyFile)\n\tif errRead != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read private key: %w\", errRead)\n\t}\n\n\tprivPemBlock, _ := pem.Decode(rootKey)\n\n\t// Note that we use PKCS8 to parse the private key here.\n\trootPrivKey, errParse := x509.ParsePKCS8PrivateKey(privPemBlock.Bytes)\n\tif errParse != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse private key: %w\", errParse)\n\t}\n\n\troot.PrivateKey = rootPrivKey.(*rsa.PrivateKey)\n\n\trootCert, errRead := ioutil.ReadFile(publicKeyFile)\n\tif errRead != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read public key: %w\", errRead)\n\t}\n\n\tpublicPemBlock, _ := pem.Decode(rootCert)\n\n\trootPubCrt, errParse := x509.ParseCertificate(publicPemBlock.Bytes)\n\tif errParse != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse public key: %w\", errParse)\n\t}\n\n\troot.PublicKey = rootPubCrt\n\n\treturn root, nil\n}", "func TryDecodePemData(data []byte) []byte {\n\tb, src := pem.Decode(data)\n\tif b != nil {\n\t\treturn b.Bytes\n\t} else {\n\t\treturn src\n\t}\n}", "func GetCertPoolFromPEMData(pemData []string) *x509.CertPool {\n\tcertPool := x509.NewCertPool()\n\tfor _, pem := range pemData {\n\t\tcertPool.AppendCertsFromPEM([]byte(pem))\n\t}\n\treturn certPool\n}", "func GetFileCertificates(target string, debug bool) ([]*x509.Certificate, string, error) {\n\tif debug {\n\t\tlog.Printf(\"Resolving '%s'...\", target)\n\t}\n\n\tpath := strings.TrimLeft(target, fileSchemaStr)\n\n\tabsPath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tif debug {\n\t\tlog.Printf(\"Absolute path is %s\", absPath)\n\t}\n\n\tcerts, err := loadCertificates(absPath, debug)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\thostname := \"\"\n\tif len(certs) > 0 {\n\t\thostname = certs[0].Subject.CommonName\n\t}\n\n\treturn certs, hostname, nil\n}", "func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {\n\tvar err error\n\n\t// Parse PEM block\n\tvar block *pem.Block\n\tif block, _ = pem.Decode(key); block == nil {\n\t\treturn nil, ErrKeyMustBePEMEncoded\n\t}\n\n\t// Parse the key\n\tvar parsedKey interface{}\n\tif parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar pkey *ecdsa.PrivateKey\n\tvar ok bool\n\tif pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {\n\t\treturn nil, ErrNotECPrivateKey\n\t}\n\n\treturn pkey, nil\n}", "func processMainCert(m android.ModuleBase, certPropValue string, certificates []Certificate, ctx android.ModuleContext) []Certificate {\n\tif android.SrcIsModule(certPropValue) == \"\" {\n\t\tvar mainCert Certificate\n\t\tif certPropValue != \"\" {\n\t\t\tdefaultDir := ctx.Config().DefaultAppCertificateDir(ctx)\n\t\t\tmainCert = Certificate{\n\t\t\t\tPem: defaultDir.Join(ctx, certPropValue+\".x509.pem\"),\n\t\t\t\tKey: defaultDir.Join(ctx, certPropValue+\".pk8\"),\n\t\t\t}\n\t\t} else {\n\t\t\tpem, key := ctx.Config().DefaultAppCertificate(ctx)\n\t\t\tmainCert = Certificate{\n\t\t\t\tPem: pem,\n\t\t\t\tKey: key,\n\t\t\t}\n\t\t}\n\t\tcertificates = append([]Certificate{mainCert}, certificates...)\n\t}\n\n\tif !m.Platform() {\n\t\tcertPath := certificates[0].Pem.String()\n\t\tsystemCertPath := ctx.Config().DefaultAppCertificateDir(ctx).String()\n\t\tif strings.HasPrefix(certPath, systemCertPath) {\n\t\t\tenforceSystemCert := ctx.Config().EnforceSystemCertificate()\n\t\t\tallowed := ctx.Config().EnforceSystemCertificateAllowList()\n\n\t\t\tif enforceSystemCert && !inList(m.Name(), allowed) {\n\t\t\t\tctx.PropertyErrorf(\"certificate\", \"The module in product partition cannot be signed with certificate in system.\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn certificates\n}", "func parseCertChain(certChain []string) (*x509.Certificate, *x509.CertPool, *x509.CertPool, error) {\n\tidentity, err := parseCert([]byte(certChain[0]))\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tintermediates := x509.NewCertPool()\n\tfor _, cert := range certChain[1 : len(certChain)-1] {\n\t\ti, err := parseCert([]byte(cert))\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tintermediates.AddCert(i)\n\t}\n\n\troots := x509.NewCertPool()\n\troot, err := parseCert([]byte(certChain[len(certChain)-1]))\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\troots.AddCert(root)\n\n\treturn identity, intermediates, roots, nil\n}", "func tryParseX509(block *pem.Block) ([]byte, error) {\n\t// if certificate is already x509 encoded, return the certificate, otherwise continue and parse.\n\t_, err := x509.ParseCertificate(block.Bytes)\n\tif err == nil {\n\t\treturn block.Bytes, nil\n\t}\n\n\tb, err := pkcs7.Parse(block.Bytes)\n\tif err == nil {\n\t\tif len(b.Certificates) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"expected one or more certificates\")\n\t\t}\n\t\treturn b.Certificates[0].Raw, nil\n\t}\n\n\terr = fmt.Errorf(\"parsing PKCS7: %w\", err)\n\treturn nil, err\n}", "func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (cert Certificate, err error) {\n\tvar certDERBlock *pem.Block\n\tfor {\n\t\tcertDERBlock, certPEMBlock = pem.Decode(certPEMBlock)\n\t\tif certDERBlock == nil {\n\t\t\tbreak\n\t\t}\n\t\tif certDERBlock.Type == \"CERTIFICATE\" {\n\t\t\tcert.Certificate = append(cert.Certificate, certDERBlock.Bytes)\n\t\t}\n\t}\n\n\tif len(cert.Certificate) == 0 {\n\t\terr = errors.New(\"crypto/tls: failed to parse certificate PEM data\")\n\t\treturn\n\t}\n\n\tvar keyDERBlock *pem.Block\n\tfor {\n\t\tkeyDERBlock, keyPEMBlock = pem.Decode(keyPEMBlock)\n\t\tif keyDERBlock == nil {\n\t\t\terr = errors.New(\"crypto/tls: failed to parse key PEM data\")\n\t\t\treturn\n\t\t}\n\t\tif keyDERBlock.Type == \"PRIVATE KEY\" || strings.HasSuffix(keyDERBlock.Type, \" PRIVATE KEY\") {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tcert.PrivateKey, err = parsePrivateKey(keyDERBlock.Bytes)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tx509Cert, err := x509.ParseCertificate(cert.Certificate[0])\n\tif err != nil {\n\t\treturn\n\t}\n\n\tswitch pub := x509Cert.PublicKey.(type) {\n\tcase *rsa.PublicKey:\n\t\tpriv, ok := cert.PrivateKey.(*rsa.PrivateKey)\n\t\tif !ok {\n\t\t\terr = errors.New(\"crypto/tls: private key type does not match public key type\")\n\t\t\treturn\n\t\t}\n\t\tif pub.N.Cmp(priv.N) != 0 {\n\t\t\terr = errors.New(\"crypto/tls: private key does not match public key\")\n\t\t\treturn\n\t\t}\n\tcase *ecdsa.PublicKey:\n\t\tpriv, ok := cert.PrivateKey.(*ecdsa.PrivateKey)\n\t\tif !ok {\n\t\t\terr = errors.New(\"crypto/tls: private key type does not match public key type\")\n\t\t\treturn\n\n\t\t}\n\t\tif pub.X.Cmp(priv.X) != 0 || pub.Y.Cmp(priv.Y) != 0 {\n\t\t\terr = errors.New(\"crypto/tls: private key does not match public key\")\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\terr = errors.New(\"crypto/tls: unknown public key algorithm\")\n\t\treturn\n\t}\n\n\treturn\n}", "func CheckCertificate(crt string) {\n\t// Read and parse the PEM certificate file\n\tpemData, err := ioutil.ReadFile(crt)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tblock, rest := pem.Decode([]byte(pemData))\n\tif block == nil || len(rest) > 0 {\n\t\tlog.Fatal(\"Certificate decoding error\")\n\t}\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Print the certificate\n\tresult, err := certinfo.CertificateText(cert)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Print(result)\n}" ]
[ "0.77496916", "0.75687236", "0.75398403", "0.7404843", "0.7188307", "0.70729065", "0.70729065", "0.7034357", "0.695529", "0.6716552", "0.6715112", "0.66574687", "0.6649889", "0.65145785", "0.65145785", "0.6448182", "0.64413345", "0.6420031", "0.64128685", "0.6380914", "0.63287586", "0.6325988", "0.6304928", "0.6304928", "0.6277045", "0.6259233", "0.624631", "0.6212882", "0.62042", "0.61241335", "0.6111173", "0.6089684", "0.60872865", "0.60574955", "0.6055173", "0.6051127", "0.6040207", "0.600383", "0.60011613", "0.59316045", "0.58883727", "0.58685946", "0.580676", "0.5777994", "0.5750608", "0.57166374", "0.5715598", "0.5708804", "0.5692728", "0.56884074", "0.5680215", "0.567825", "0.5677869", "0.56624395", "0.5644777", "0.560937", "0.56065416", "0.5594256", "0.5581112", "0.55776215", "0.5570288", "0.5568972", "0.5562209", "0.5558961", "0.5551693", "0.55453587", "0.5503894", "0.5488332", "0.5485573", "0.5481234", "0.5473627", "0.54648715", "0.545304", "0.5450038", "0.5446921", "0.5444788", "0.5443095", "0.5428819", "0.5422212", "0.5416995", "0.54002357", "0.5398327", "0.53878933", "0.53814346", "0.53681606", "0.53614587", "0.53603786", "0.5338749", "0.53325886", "0.5328506", "0.53159744", "0.5271638", "0.5270206", "0.52695346", "0.52585775", "0.5252943", "0.52502066", "0.52228767", "0.51927114", "0.51896864" ]
0.85923237
0
GetFingerprint return SHA256 fingerprint of a cert
func GetFingerprint(cert_file string) string { cert, err := ParseCertPemFile(cert_file) if err != nil { log.Printf("GetFingerprint: ParseCert %s: %v", cert_file, err) return "" } return SHA256SumRaw(cert.Raw) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Fingerprint(certificate []byte) FingerprintBytes {\n\treturn sha256.Sum256(certificate)\n}", "func (c *CertInfo) Fingerprint() string {\n\tfingerprint, err := CertFingerprintStr(string(c.PublicKey()))\n\t// Parsing should never fail, since we generated the cert ourselves,\n\t// but let's check the error for good measure.\n\tif err != nil {\n\t\tpanic(\"invalid public key material\")\n\t}\n\n\treturn fingerprint\n}", "func GetFingerPrint(signer ssh.Signer) (string, string) {\n\treturn ssh.FingerprintLegacyMD5(signer.PublicKey()),\n\t\tssh.FingerprintSHA256(signer.PublicKey())\n}", "func getFingerprint(fn string) (fingerprint string, err kv.Error) {\n\tdata, errGo := ioutil.ReadFile(fn)\n\tif errGo != nil {\n\t\treturn \"\", kv.Wrap(errGo).With(\"filename\", fn).With(\"stack\", stack.Trace().TrimRuntime())\n\t}\n\n\tkey, err := extractPubKey(data)\n\tif err != nil {\n\t\treturn \"\", err.With(\"filename\", fn)\n\t}\n\n\treturn ssh.FingerprintSHA256(key), nil\n}", "func HandleCertFingerprintRequest(w http.ResponseWriter, req *http.Request) {\n\t//logger.Info.Printf(\"Cert Hash\")\n\tfingerprint, err := GetCertificateFingerprint()\n\tif err != nil {\n\t\tlogger.Error.Printf(\"%s\\n\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(fingerprint)\n}", "func spkiFingerprint(cert string) (fingerprint, error) {\n\tprivateKeyFile, err := os.Open(cert)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %w\", cert, err)\n\t}\n\tdefer privateKeyFile.Close()\n\n\tpemFileInfo, err := privateKeyFile.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar size int64 = pemFileInfo.Size()\n\tpemBytes := make([]byte, size)\n\tbuffer := bufio.NewReader(privateKeyFile)\n\t_, err = buffer.Read(pemBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Get first block of PEM file\n\tdata, rest := pem.Decode([]byte(pemBytes))\n\tconst certificateBlock = \"CERTIFICATE\"\n\tif data.Type != certificateBlock {\n\t\tfor len(rest) > 0 {\n\t\t\tdata, rest = pem.Decode(rest)\n\t\t\tif data.Type == certificateBlock {\n\t\t\t\t// Sign the CERTIFICATE block with SHA1\n\t\t\t\th := sha1.New()\n\t\t\t\t_, err := h.Write(data.Bytes)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn fingerprint(h.Sum(nil)), nil\n\t\t\t}\n\t\t}\n\t\treturn nil, errors.New(\"Cannot find CERTIFICATE in file\")\n\t}\n\th := sha1.New()\n\t_, err = h.Write(data.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fingerprint(h.Sum(nil)), nil\n}", "func (socket *Socket) CertFP() (string, error) {\n\tvar tlsConn, isTLS = socket.conn.(*tls.Conn)\n\tif !isTLS {\n\t\treturn \"\", errNotTLS\n\t}\n\n\t// ensure handehake is performed, and timeout after a few seconds\n\ttlsConn.SetDeadline(time.Now().Add(handshakeTimeout))\n\terr := tlsConn.Handshake()\n\ttlsConn.SetDeadline(time.Time{})\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpeerCerts := tlsConn.ConnectionState().PeerCertificates\n\tif len(peerCerts) < 1 {\n\t\treturn \"\", errNoPeerCerts\n\t}\n\n\trawCert := sha256.Sum256(peerCerts[0].Raw)\n\tfingerprint := hex.EncodeToString(rawCert[:])\n\n\treturn fingerprint, nil\n}", "func TestRequestFingerprint(t *testing.T) {\n\n\treq, err := http.NewRequest(\"GET\", \"http://example.com\", nil)\n\texpect(t, err, nil)\n\n\tfp := getRequestFingerprint(req)\n\n\texpect(t, fp, \"92a65ed4ca2b7100037a4cba9afd15ea\")\n\n}", "func fingerprintKey(s string) (fingerprint string, err error) {\n\tdata, err := base64.StdEncoding.DecodeString(s)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Can't base64 decode original key\")\n\t}\n\tsha256 := sha256.New()\n\tsha256.Write(data)\n\tb64 := base64.StdEncoding.EncodeToString(sha256.Sum(nil))\n\treturn strings.TrimRight(b64, \"=\"), nil\n}", "func Fingerprint(input string) (result string, err error) {\n\treturn parser.FingerprintToHexStr(input)\n}", "func fingerprintSHA256(key ssh.PublicKey) string {\r\n\thash := sha256.Sum256(key.Marshal())\r\n\tb64hash := base64.StdEncoding.EncodeToString(hash[:])\r\n\treturn strings.TrimRight(b64hash, \"=\")\r\n}", "func FingerprintSHA256(b []byte) string {\n\tdigest := sha256.Sum256(b)\n\treturn hex.EncodeToString(digest[:])\n}", "func (a *AuthorizationRequest) GetFingerprint() string {\n\tif a == nil || a.Fingerprint == nil {\n\t\treturn \"\"\n\t}\n\treturn *a.Fingerprint\n}", "func (a *Authorization) GetFingerprint() string {\n\tif a == nil || a.Fingerprint == nil {\n\t\treturn \"\"\n\t}\n\treturn *a.Fingerprint\n}", "func fingerprint(str []byte) uint64 {\n\tvar hi = hash32(str, 0, len(str), 0)\n\tvar lo = hash32(str, 0, len(str), 102072)\n\tif (hi == 0) && (lo == 0 || lo == 1) {\n\t\t// Turn 0/1 into another fingerprint\n\t\thi ^= 0x130f9bef\n\t\tlo ^= 0x94a0a928\n\t}\n\treturn (uint64(hi) << 32) | uint64(lo&0xffffffff)\n}", "func (a *AuthorizationUpdateRequest) GetFingerprint() string {\n\tif a == nil || a.Fingerprint == nil {\n\t\treturn \"\"\n\t}\n\treturn *a.Fingerprint\n}", "func (*DoubleSchema) Fingerprint() (*Fingerprint, error) {\n\treturn &Fingerprint{\n\t\t115, 10, 154, 140, 97, 22, 129, 215, 238, 244, 66, 224, 60, 22, 199, 13,\n\t\t19, 188, 163, 235, 139, 151, 123, 180, 3, 234, 255, 82, 23, 106, 242, 84,\n\t}, nil\n}", "func (pr *PkgDecoder) Fingerprint() [8]byte {\n\tvar fp [8]byte\n\tcopy(fp[:], pr.elemData[len(pr.elemData)-8:])\n\treturn fp\n}", "func (keyRing *KeyRing) GetFingerprint() (string, error) {\n\tfor _, entity := range keyRing.entities {\n\t\tfp := entity.PrimaryKey.Fingerprint\n\t\treturn hex.EncodeToString(fp[:]), nil\n\t}\n\treturn \"\", errors.New(\"can't find public key\")\n}", "func (*BytesSchema) Fingerprint() (*Fingerprint, error) {\n\treturn &Fingerprint{\n\t\t154, 229, 7, 169, 221, 57, 238, 91, 124, 126, 40, 93, 162, 192, 132, 101,\n\t\t33, 200, 174, 141, 128, 254, 234, 229, 80, 78, 12, 152, 29, 83, 245, 250,\n\t}, nil\n}", "func HashCert(certificate *x509.Certificate) string {\n\tspkiHash := sha256.Sum256(certificate.RawSubjectPublicKeyInfo)\n\treturn \"sha256:\" + strings.ToLower(hex.EncodeToString(spkiHash[:]))\n}", "func (nodes List) Fingerprint() []byte {\n\treturn nodes[0].fingerprint\n}", "func KeyFingerprint(code uint64, pubKeyValue []byte) string {\n\tmulticodecValue := multicodec(code)\n\tmcLength := len(multicodecValue)\n\tbuf := make([]uint8, mcLength+len(pubKeyValue))\n\tcopy(buf, multicodecValue)\n\tcopy(buf[mcLength:], pubKeyValue)\n\n\treturn fmt.Sprintf(\"z%s\", base58.Encode(buf))\n}", "func Fingerprint(slice []byte) []byte {\n\tfingerprint := make([]byte, 6)\n\tcopy(fingerprint, slice)\n\treturn fingerprint\n}", "func SSHFingerprintSHA256(key ssh.PublicKey) string {\n\thash := sha256.Sum256(key.Marshal())\n\tb64hash := base64.StdEncoding.EncodeToString(hash[:])\n\treturn strings.TrimRight(b64hash, \"=\")\n}", "func (o SslCertOutput) Sha1Fingerprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SslCert) pulumi.StringOutput { return v.Sha1Fingerprint }).(pulumi.StringOutput)\n}", "func generateFingerprint() (string, error) {\n\tbytes := make([]byte, 32)\n\t_, err := rand.Read(bytes)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn base64.URLEncoding.EncodeToString(bytes), nil\n}", "func TokenFingerprint(tok string) string {\n\tdigest := sha256.Sum256([]byte(tok))\n\treturn hex.EncodeToString(digest[:16])\n}", "func (a *Age) Fingerprint(ctx context.Context, id string) string {\n\treturn id\n}", "func (a *Age) Fingerprint(ctx context.Context, id string) string {\n\treturn id\n}", "func (w *XPubWallet) Fingerprint() string {\n\t// Note: the xpub key is not used as the fingerprint, because it is\n\t// partially sensitive data\n\taddr := \"\"\n\tif len(w.Entries) == 0 {\n\t\tif !w.IsEncrypted() {\n\t\t\tentries, err := w.generateEntries(1, 0)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Panic(\"Fingerprint failed to generate initial entry for empty wallet\")\n\t\t\t}\n\t\t\taddr = entries[0].Address.String()\n\t\t}\n\t} else {\n\t\taddr = w.Entries[0].Address.String()\n\t}\n\n\treturn fmt.Sprintf(\"%s-%s\", w.Type(), addr)\n}", "func probe(c *dns.Client, addr string, f *fingerprint) *fingerprint {\n\tm := f.msg()\n\tr, err := c.Exchange(m, addr)\n\tif err != nil {\n\t\treturn errorToFingerprint(err)\n\t}\n\treturn toFingerprint(r)\n}", "func Fingerprint(scope *Scope, data tf.Output, method tf.Output) (fingerprint tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"Fingerprint\",\n\t\tInput: []tf.Input{\n\t\t\tdata, method,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func KeyFingerprint(code uint64, pubKeyValue []byte) string {\n\treturn fingerprint.KeyFingerprint(code, pubKeyValue)\n}", "func (o SshPublicKeyOutput) Fingerprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SshPublicKey) pulumi.StringOutput { return v.Fingerprint }).(pulumi.StringOutput)\n}", "func (o OrganizationSecurityPolicyOutput) Fingerprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *OrganizationSecurityPolicy) pulumi.StringOutput { return v.Fingerprint }).(pulumi.StringOutput)\n}", "func Fingerprint(path Path) PathFingerprint {\n\tmeta := path.Metadata()\n\tif meta == nil || len(meta.Interfaces) == 0 {\n\t\treturn \"\"\n\t}\n\th := sha256.New()\n\tfor _, intf := range meta.Interfaces {\n\t\tbinary.Write(h, binary.BigEndian, intf.IA)\n\t\tbinary.Write(h, binary.BigEndian, intf.ID)\n\t}\n\treturn PathFingerprint(h.Sum(nil))\n}", "func fingerprint(cm *v1.ConfigMap) uint64 {\n\thash := fnv.New64a()\n\tdata := json.NewEncoder(hash)\n\tdata.Encode(cm.Labels)\n\tdata.Encode(cm.Data)\n\treturn hash.Sum64()\n}", "func (o InstanceServerCaCertOutput) Sha1Fingerprint() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v InstanceServerCaCert) *string { return v.Sha1Fingerprint }).(pulumi.StringPtrOutput)\n}", "func (_Registry *RegistryCaller) GetCertificate(opts *bind.CallOpts, _id *big.Int) (struct {\n\tIssuer common.Address\n\tTopic *big.Int\n\tValidityCall []byte\n\tData []byte\n}, error) {\n\tvar out []interface{}\n\terr := _Registry.contract.Call(opts, &out, \"getCertificate\", _id)\n\n\toutstruct := new(struct {\n\t\tIssuer common.Address\n\t\tTopic *big.Int\n\t\tValidityCall []byte\n\t\tData []byte\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Issuer = *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\toutstruct.Topic = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\toutstruct.ValidityCall = *abi.ConvertType(out[2], new([]byte)).(*[]byte)\n\toutstruct.Data = *abi.ConvertType(out[3], new([]byte)).(*[]byte)\n\n\treturn *outstruct, err\n\n}", "func GetFingerprint(extension string, reader io.Reader, f decoder.MediaDecoder) *structs.Analysis {\n\n\timg, err := f.Decode(extension, reader)\n\tif err != nil {\n\t\tlog.Printf(\"GetFingerprint: unable to decode file with extension %s: %s\\n\", extension, err)\n\t\treturn &structs.Analysis{FingerprintErrorString: err.Error()}\n\t}\n\n\tfr, err := fingerprint(img)\n\tif err != nil {\n\t\treturn &structs.Analysis{Fingerprint: fr, FingerprintErrorString: err.Error()}\n\t}\n\n\treturn &structs.Analysis{Fingerprint: fr}\n\n}", "func (*StringSchema) Fingerprint() (*Fingerprint, error) {\n\treturn &Fingerprint{\n\t\t233, 229, 193, 201, 228, 246, 39, 115, 57, 209, 188, 222, 7, 51, 165, 155,\n\t\t212, 47, 135, 49, 244, 73, 218, 109, 193, 48, 16, 169, 22, 147, 13, 72,\n\t}, nil\n}", "func (*LongSchema) Fingerprint() (*Fingerprint, error) {\n\treturn &Fingerprint{\n\t\t195, 44, 73, 125, 246, 115, 12, 151, 250, 7, 54, 42, 165, 2, 63, 55,\n\t\t212, 154, 2, 126, 196, 82, 54, 7, 120, 17, 76, 244, 39, 150, 90, 221,\n\t}, nil\n}", "func (o *ApplianceImageBundleAllOf) GetFingerprint() string {\n\tif o == nil || o.Fingerprint == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Fingerprint\n}", "func PublicKeyFingerprint(rp *rsa.PublicKey) (string, error) {\n\tsp, err := ssh.NewPublicKey(rp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ssh.FingerprintSHA256(sp), nil\n}", "func (s *RecordSchema) Fingerprint() (*Fingerprint, error) {\n\tif s.fingerprint == nil {\n\t\tif f, err := calculateSchemaFingerprint(s); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\ts.fingerprint = f\n\t\t}\n\t}\n\treturn s.fingerprint, nil\n}", "func FprintCertificate(w io.Writer, cert *x509.Certificate) {\n\tfmt.Fprintln(w, \"Version:\", cert.Version)\n\tif cert.SerialNumber.BitLen() > 63 {\n\t\tfmt.Fprintf(w, \"Serial: 0x%x\\n\", cert.SerialNumber)\n\t} else {\n\t\tfmt.Fprintf(w, \"Serial: %d (0x%x)\\n\", cert.SerialNumber, cert.SerialNumber)\n\t}\n\tfmt.Fprintln(w, \"Subject:\", FormatSubject(cert))\n\tfmt.Fprintln(w, \"Issuer: \", FormatIssuer(cert))\n\tfmt.Fprintln(w, \"Valid: \", cert.NotBefore)\n\tfmt.Fprintln(w, \"Expires:\", cert.NotAfter)\n\tfmt.Fprintln(w, \"Period: \", subDate(cert.NotAfter, cert.NotBefore))\n\tswitch k := cert.PublicKey.(type) {\n\tcase *rsa.PublicKey:\n\t\tn := fmt.Sprintf(\"%x\", k.N)\n\t\tfmt.Fprintf(w, \"Pub key: RSA bits=%d e=%d n=%s...%s\\n\", k.N.BitLen(), k.E, n[:8], n[len(n)-8:])\n\tcase *ecdsa.PublicKey:\n\t\tp := k.Params()\n\t\tx := fmt.Sprintf(\"%x\", k.X)\n\t\ty := fmt.Sprintf(\"%x\", k.Y)\n\t\tfmt.Fprintf(w, \"Public key: ECDSA bits=%d name=%s x=%s... y=...%s\\n\", p.BitSize, p.Name, x[:8], y[len(y)-8:])\n\tdefault:\n\t\tfmt.Fprintf(w, \"Public key: %T\\n\", k)\n\t}\n\tfmt.Fprintln(w, \"Sig alg:\", cert.SignatureAlgorithm)\n\tfmt.Fprintln(w, \"Extensions:\")\n\t// subject alternate names\n\tprintSAN(w, cert)\n\t// basic constraints\n\tif cert.BasicConstraintsValid {\n\t\tcons := fmt.Sprintf(\"isCA=%t\", cert.IsCA)\n\t\tif cert.MaxPathLenZero {\n\t\t\tcons += \" MaxPathLen=0\"\n\t\t} else if cert.MaxPathLen > 0 {\n\t\t\tcons += fmt.Sprintf(\" MaxPathLen=%d\", cert.MaxPathLen)\n\t\t}\n\t\tfmt.Fprintln(w, \" Basic constraints: \"+cons)\n\t}\n\t// Name constraints\n\tprintNameConstraints(w, cert)\n\t// key usage\n\tusage := \"\"\n\tfor n, name := range keyUsageNames {\n\t\tif cert.KeyUsage&n != 0 {\n\t\t\tusage += \", \" + name\n\t\t}\n\t}\n\tif usage != \"\" {\n\t\tfmt.Fprintln(w, \" Key Usage:\", usage[2:])\n\t}\n\t// extended key usage\n\tusage = \"\"\n\tfor _, u := range cert.ExtKeyUsage {\n\t\tname := extKeyUsageNames[u]\n\t\tif name == \"\" {\n\t\t\tname = fmt.Sprintf(\"%d\", u)\n\t\t}\n\t\tusage += \", \" + name\n\t}\n\tfor _, u := range cert.UnknownExtKeyUsage {\n\t\tusage += \", \" + u.String()\n\t}\n\tif usage != \"\" {\n\t\tfmt.Fprintln(w, \" Extended key usage:\", usage[2:])\n\t}\n\t// keyids\n\tif len(cert.SubjectKeyId) != 0 {\n\t\tfmt.Fprintf(w, \" Subject key ID: %x\\n\", cert.SubjectKeyId)\n\t}\n\tif len(cert.AuthorityKeyId) != 0 {\n\t\tfmt.Fprintf(w, \" Authority key ID: %x\\n\", cert.AuthorityKeyId)\n\t}\n\t// authority info\n\tif len(cert.OCSPServer) != 0 {\n\t\tfmt.Fprintln(w, \" OCSP Servers:\")\n\t\tfor _, s := range cert.OCSPServer {\n\t\t\tfmt.Fprintln(w, \" \", s)\n\t\t}\n\t}\n\tif len(cert.IssuingCertificateURL) != 0 {\n\t\tfmt.Fprintln(w, \" Issuing authority URLs:\")\n\t\tfor _, s := range cert.IssuingCertificateURL {\n\t\t\tfmt.Fprintln(w, \" \", s)\n\t\t}\n\t}\n\t// CRL\n\tif len(cert.CRLDistributionPoints) != 0 {\n\t\tfmt.Fprintln(w, \" CRL Distribution Points:\")\n\t\tfor _, s := range cert.CRLDistributionPoints {\n\t\t\tfmt.Fprintln(w, \" \", s)\n\t\t}\n\t}\n\t// Policy IDs\n\tif len(cert.PolicyIdentifiers) != 0 {\n\t\tfmt.Fprintln(w, \" Policy Identifiers:\")\n\t\tfor _, s := range cert.PolicyIdentifiers {\n\t\t\tfmt.Fprintln(w, \" \", s.String())\n\t\t}\n\t}\n\t// Other\n\tfor _, ex := range cert.Extensions {\n\t\tif knownExtension(ex.Id) {\n\t\t\tcontinue\n\t\t}\n\t\tcritical := \"\"\n\t\tif ex.Critical {\n\t\t\tcritical = \" (critical)\"\n\t\t}\n\t\tfmt.Fprintf(w, \" Extension %s%s: %x\\n\", ex.Id, critical, ex.Value)\n\t}\n}", "func (s *refSchema) Fingerprint() (*Fingerprint, error) {\n\treturn s.Fingerprint()\n}", "func GetIdpServerCertThumbprint(ctx context.Context, url string) (string, error) {\n\tlog := log.Logger(ctx, \"internal.utils.oidc\", \"GetIdpServerCertThumbprint\")\n\tlog.Info(\"Calculating Idp Server cert Thumbprint\")\n\n\tthumbprint := \"\"\n\thostName, err := parseURL(ctx, url)\n\tif err != nil {\n\t\tlog.Error(err, \"Unable to get the host\")\n\t\treturn thumbprint, err\n\t}\n\tconn, err := tls.Dial(\"tcp\", hostName, &tls.Config{\n\t\tInsecureSkipVerify: true,\n\t})\n\tif err != nil {\n\t\tlog.Error(err, \"Unable to dial remote host\")\n\t\treturn thumbprint, err\n\t}\n\t//Close the connection\n\tdefer conn.Close()\n\n\tcs := conn.ConnectionState()\n\tnumCerts := len(cs.PeerCertificates)\n\tvar root *x509.Certificate\n\t// Important! Get the last cert in the chain, which is the root CA.\n\tif numCerts >= 1 {\n\t\troot = cs.PeerCertificates[numCerts-1]\n\t} else {\n\t\tlog.Error(err, \"Error getting cert list from connection for Idp Cert Thumbprint calculation\")\n\t\treturn thumbprint, err\n\t}\n\tthumbprint = fmt.Sprintf(\"%x\", sha1.Sum(root.Raw))\n\t// print out the fingerprint\n\tlog.Info(\"Successfully able to retrieve Idp Server cert thumbprint\", \"thumbprint\", thumbprint)\n\treturn thumbprint, nil\n}", "func toFingerprint(m *dns.Msg) *fingerprint {\n\tif m == nil {\n\t\treturn nil\n\t}\n\th := m.MsgHdr\n\tf := new(fingerprint)\n\n\tif len(m.Question) > 0 {\n\t\tif len(m.Question[0].Name) == 0 {\n\t\t\tf.Query.Name = \".\"\n\t\t} else {\n\t\t\tf.Query.Name = m.Question[0].Name\n\t\t}\n\t\tf.Query.Qtype = m.Question[0].Qtype\n\t\tf.Query.Qclass = m.Question[0].Qclass\n\t} else {\n\t\t// Default, nil values\n\t\tf.Query.Name = \".\"\n\t\tf.Query.Qtype = 0\n\t\tf.Query.Qclass = 0\n\t}\n\n\tf.Opcode = h.Opcode\n\tf.Rcode = h.Rcode\n\tf.Response = h.Response\n\tf.Authoritative = h.Authoritative\n\tf.Truncated = h.Truncated\n\tf.RecursionDesired = h.RecursionDesired\n\tf.RecursionAvailable = h.RecursionAvailable\n\tf.AuthenticatedData = h.AuthenticatedData\n\tf.CheckingDisabled = h.CheckingDisabled\n\tf.Zero = h.Zero\n\n\tf.Question = len(m.Question)\n\tf.Answer = len(m.Answer)\n\tf.Ns = len(m.Ns)\n\tf.Extra = len(m.Extra)\n\tf.Do = false\n\tf.UDPSize = 0\n\n\tfor _, r := range m.Extra {\n\t\tif r.Header().Rrtype == dns.TypeOPT {\n\t\t\t// version is always 0 - and I cannot set it anyway\n\t\t\tf.Do = r.(*dns.RR_OPT).Do()\n\t\t\tf.UDPSize = int(r.(*dns.RR_OPT).UDPSize())\n\t\t\tif len(r.(*dns.RR_OPT).Option) == 1 {\n\t\t\t\t// Only support NSID atm\n\t\t\t\tf.Nsid = r.(*dns.RR_OPT).Option[0].Option() == dns.EDNS0NSID\n\t\t\t}\n\t\t}\n\t}\n\treturn f\n}", "func Fingerprint(labels []*remote.LabelPair) uint64 {\n\tif len(labels) == 0 {\n\t\treturn offset64\n\t}\n\n\tsum := offset64\n\tfor _, l := range labels {\n\t\tsum = hashAdd(sum, l.Name)\n\t\tsum = hashAddByte(sum, separatorByte)\n\t\tsum = hashAdd(sum, l.Value)\n\t\tsum = hashAddByte(sum, separatorByte)\n\t}\n\treturn sum\n}", "func (o LookupPartnerAccountResultOutput) Fingerprint() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v LookupPartnerAccountResult) *string { return v.Fingerprint }).(pulumi.StringPtrOutput)\n}", "func (s *RecursiveSchema) Fingerprint() (*Fingerprint, error) {\n\treturn s.Actual.Fingerprint()\n}", "func getHS256Signature(encHeader string, encPayload string, pubKeyHexa string) string {\n\topenssl := exec.Command(\"openssl\", \"dgst\", \"-sha256\", \"-mac\", \"HMAC\", \"-macopt\", \"hexkey:\"+pubKeyHexa)\n\n\topenssl.Stdin = bytes.NewReader([]byte(encHeader + \".\" + encPayload))\n\n\tcmdOutput := &bytes.Buffer{}\n\topenssl.Stdout = cmdOutput\n\topenssl.Start()\n\topenssl.Wait()\n\thmac := string(cmdOutput.Bytes())\n\treturn hex.EncodeToString([]byte(hmac))\n}", "func (o TlsCertificateResponseOutput) Sha1Fingerprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TlsCertificateResponse) string { return v.Sha1Fingerprint }).(pulumi.StringOutput)\n}", "func (*NullSchema) Fingerprint() (*Fingerprint, error) {\n\treturn &Fingerprint{\n\t\t240, 114, 203, 236, 59, 248, 132, 24, 113, 212, 40, 66, 48, 197, 233, 131,\n\t\t220, 33, 26, 86, 131, 122, 237, 134, 36, 135, 20, 143, 148, 125, 26, 31,\n\t}, nil\n}", "func (o NetworkAttachmentOutput) Fingerprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *NetworkAttachment) pulumi.StringOutput { return v.Fingerprint }).(pulumi.StringOutput)\n}", "func (c *AccountsUpdateCall) Fingerprint(fingerprint string) *AccountsUpdateCall {\n\tc.params_.Set(\"fingerprint\", fmt.Sprintf(\"%v\", fingerprint))\n\treturn c\n}", "func (_Registry *RegistryCallerSession) GetCertificate(_id *big.Int) (struct {\n\tIssuer common.Address\n\tTopic *big.Int\n\tValidityCall []byte\n\tData []byte\n}, error) {\n\treturn _Registry.Contract.GetCertificate(&_Registry.CallOpts, _id)\n}", "func (a *A25) doubleSHA256() []byte {\n\th := sha256.New()\n\th.Write(a[:21])\n\td := h.Sum([]byte{})\n\th = sha256.New()\n\th.Write(d)\n\treturn h.Sum(d[:0])\n}", "func (*IntSchema) Fingerprint() (*Fingerprint, error) {\n\treturn &Fingerprint{\n\t\t63, 43, 135, 169, 254, 124, 201, 177, 56, 53, 89, 140, 57, 129, 205, 69,\n\t\t227, 227, 85, 48, 158, 80, 144, 170, 9, 51, 215, 190, 203, 111, 186, 69,\n\t}, nil\n}", "func (s *FixedSchema) Fingerprint() (*Fingerprint, error) {\n\tif s.fingerprint == nil {\n\t\tif f, err := calculateSchemaFingerprint(s); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\ts.fingerprint = f\n\t\t}\n\t}\n\treturn s.fingerprint, nil\n}", "func makeCertID(leaf, issuer *x509.Certificate, hashName string) (string, error) {\n\tif leaf == nil {\n\t\treturn \"\", fmt.Errorf(\"leaf certificate is nil\")\n\t}\n\tif issuer == nil {\n\t\treturn \"\", fmt.Errorf(\"issuer certificate is nil\")\n\t}\n\n\tvar hashFunc crypto.Hash\n\tvar oid asn1.ObjectIdentifier\n\n\tswitch hashName {\n\t// The following correlation of hashFunc to OID is copied from a private mapping in golang.org/x/crypto/ocsp:\n\t// https://cs.opensource.google/go/x/crypto/+/refs/tags/v0.8.0:ocsp/ocsp.go;l=156\n\tcase crypto.SHA1.String():\n\t\thashFunc = crypto.SHA1\n\t\toid = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})\n\n\tcase crypto.SHA256.String():\n\t\thashFunc = crypto.SHA256\n\t\toid = asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1})\n\n\tcase crypto.SHA384.String():\n\t\thashFunc = crypto.SHA384\n\t\toid = asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2})\n\n\tcase crypto.SHA512.String():\n\t\thashFunc = crypto.SHA512\n\t\toid = asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3})\n\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"hashName %q is not supported by this package\", hashName)\n\t}\n\n\tif !hashFunc.Available() {\n\t\t// This should never happen.\n\t\treturn \"\", fmt.Errorf(\"hash function %q is not available on your platform\", hashFunc)\n\t}\n\n\tvar spki struct {\n\t\tAlgorithm pkix.AlgorithmIdentifier\n\t\tPublicKey asn1.BitString\n\t}\n\n\t_, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &spki)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\th := hashFunc.New()\n\th.Write(spki.PublicKey.RightAlign())\n\tissuerKeyHash := h.Sum(nil)\n\n\th.Reset()\n\th.Write(issuer.RawSubject)\n\tissuerNameHash := h.Sum(nil)\n\n\ttype certID struct {\n\t\tHashAlgorithm pkix.AlgorithmIdentifier\n\t\tIssuerNameHash []byte\n\t\tIssuerKeyHash []byte\n\t\tSerialNumber *big.Int\n\t}\n\n\t// DER-encode the CertID ASN.1 sequence [RFC6960].\n\tcertIDBytes, err := asn1.Marshal(certID{\n\t\tHashAlgorithm: pkix.AlgorithmIdentifier{\n\t\t\tAlgorithm: oid,\n\t\t},\n\t\tIssuerNameHash: issuerNameHash,\n\t\tIssuerKeyHash: issuerKeyHash,\n\t\tSerialNumber: leaf.SerialNumber,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// base64url-encode [RFC4648] the bytes of the DER-encoded CertID ASN.1 sequence [RFC6960].\n\tencodedBytes := base64.URLEncoding.EncodeToString(certIDBytes)\n\n\t// Any trailing '=' characters MUST be stripped.\n\treturn strings.TrimRight(encodedBytes, \"=\"), nil\n}", "func (o PartnerAccountOutput) Fingerprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *PartnerAccount) pulumi.StringOutput { return v.Fingerprint }).(pulumi.StringOutput)\n}", "func (d *Driver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) {\n\terr := shelpers.Init()\n\tif err != nil {\n\t\td.logger.Error(\"Could not init stats helper\", \"err\", err)\n\t\treturn nil, err\n\t}\n\tch := make(chan *drivers.Fingerprint)\n\tgo d.handleFingerprint(ctx, ch)\n\treturn ch, nil\n}", "func PrintFingerPrint(src string) {\n fp, _ := getFingerPrint(src)\n for _, it := range fp.items {\n fmt.Println(it.str, it.cnt)\n }\n}", "func (o *TokenCard) GetDeviceFingerprint() string {\n\tif o == nil || IsNil(o.DeviceFingerprint) {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.DeviceFingerprint\n}", "func FingerprintURI(parent, uri string) string {\n\treturn id.Checksum(parent + uri)\n}", "func (_Registry *RegistrySession) GetCertificate(_id *big.Int) (struct {\n\tIssuer common.Address\n\tTopic *big.Int\n\tValidityCall []byte\n\tData []byte\n}, error) {\n\treturn _Registry.Contract.GetCertificate(&_Registry.CallOpts, _id)\n}", "func (s *EnumSchema) Fingerprint() (*Fingerprint, error) {\n\tif s.fingerprint == nil {\n\t\tif f, err := calculateSchemaFingerprint(s); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\ts.fingerprint = f\n\t\t}\n\t}\n\treturn s.fingerprint, nil\n}", "func fingerprints() {\n\tuseKey(loadKey(privateKeyPath))\n\tloadContacts(contactsPath)\n\tfor name, fingerprint := range contacts {\n\t\tfmt.Printf(\"%-20s %x\\n\", name, fingerprint)\n\t}\n}", "func getClaimSignatureDigest(bytes ...[]byte) [32]byte {\n\n\tvar combined []byte\n\tfor _, b := range bytes {\n\t\tcombined = append(combined, b...)\n\t}\n\tdigest := sha256.Sum256(combined)\n\treturn [32]byte(digest)\n}", "func (o TagsResponseOutput) Fingerprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TagsResponse) string { return v.Fingerprint }).(pulumi.StringOutput)\n}", "func (o LookupInstanceResultOutput) Fingerprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupInstanceResult) string { return v.Fingerprint }).(pulumi.StringOutput)\n}", "func (ct CustomType) Fingerprint() string {\n\treturn fmt.Sprintf(\"gocustomtype: %v\", ct)\n}", "func (acm *AcmeFS) GetCertificate(email string) tlsfs.CertificateFunc {\n\treturn func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {\n\t\thname := hello.ServerName\n\t\tif hname == \"\" {\n\t\t\treturn nil, errors.New(\"acme/acmefs: missing server name\")\n\t\t}\n\n\t\t// if the requests is for a acme temporary certificate then we just need\n\t\t// to check the cache and return that one instead.\n\t\tif strings.HasSuffix(hname, \".acme.invalid\") {\n\t\t\tif cert, err := acm.config.TLSCertCache.Get(hname); err == nil {\n\t\t\t\treturn &cert, nil\n\t\t\t}\n\n\t\t\tif cert, err := acm.config.TLSCertCache.Get(strings.TrimSuffix(hname, \".acme.invalid\")); err == nil {\n\t\t\t\treturn &cert, nil\n\t\t\t}\n\n\t\t\treturn nil, fmt.Errorf(\"acme/acmefs: no cert for %q\", hname)\n\t\t}\n\n\t\tvar wanted tls.CurveID\n\t\tvar found bool\n\t\tfor _, curves := range []tls.CurveID{tls.CurveP384, tls.CurveP256} {\n\t\t\tfor _, wanted = range hello.SupportedCurves {\n\t\t\t\tif wanted == curves {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif found {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tcurve := tlsfs.ECKey384\n\t\tswitch wanted {\n\t\tcase tls.CurveP256:\n\t\t\tcurve = tlsfs.ECKey256\n\t\tcase tls.CurveP384:\n\t\t\tcurve = tlsfs.ECKey384\n\t\tcase tls.CurveP521:\n\t\t\tcurve = tlsfs.ECKey512\n\t\t}\n\n\t\tvar acct tlsfs.NewDomain\n\t\tacct.Domain = hname\n\t\tacct.Email = email\n\t\tacct.KeyType = curve\n\t\tacct.CommonName = hname\n\n\t\tcert, _, err := acm.Create(acct, tlsfs.AgreeToTOS)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tuser, err := acm.GetUser(cert.User)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcertbundle, err := certificates.EncodeCertificate(cert.Certificate)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkeybundle, err := certificates.EncodePrivateKey(user.GetPrivateKey())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tobtained, err := tls.X509KeyPair(certbundle, keybundle)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn &obtained, nil\n\t}\n\n}", "func (o TagsResponsePtrOutput) Fingerprint() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TagsResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Fingerprint\n\t}).(pulumi.StringPtrOutput)\n}", "func newFingerprint(s string) *fingerprint {\n\tf := new(fingerprint)\n\tf.setString(s)\n\treturn f\n}", "func (cert PartialCert) Signature() hotstuff.Signature {\n\treturn cert.signature\n}", "func SignCertificate(invocationDetail *FunctionInvocation, keyvaultClient *keyvault.BaseClient, keyvaultName string, keyName string, pubKey ssh.PublicKey) (*ssh.Certificate, error) {\n\t// Generate a nonce\n\tbytes := make([]byte, 32)\n\tnonce := make([]byte, len(bytes)*2)\n\tif _, err := rand.Read(bytes); err != nil {\n\t\treturn nil, err\n\t}\n\thex.Encode(nonce, bytes)\n\n\t// Generate a random serial number\n\tserial, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set the certificate principal to the signed in user\n\tusername := strings.Split(invocationDetail.ClientPrincipalName, \"@\")[0]\n\n\t// Get the current time and generate the validFrom and ValidTo\n\tnow := time.Now()\n\tvalidFrom := now.Add(time.Second * -15)\n\tvalidTo := now.Add(time.Minute * 2)\n\n\t// Convert the extensions slice to a map\n\textensions := make(map[string]string, len(supportedExtensions))\n\tfor _, ext := range supportedExtensions {\n\t\textensions[ext] = \"\"\n\t}\n\n\tcriticalOptions := make(map[string]string)\n\t// criticalOptions[\"force-command\"] = \"echo Hello, SSHizzle!\"\n\t// criticalOptions[\"source-address\"] = \"192.168.0.0/24\"\n\n\t// Key ID to [loosely] follow Netflix BLESS format: https://github.com/Netflix/bless\n\tkeyID := fmt.Sprintf(\"request[%s] for[%s] from[%s] command[%s] ssh_key[%s] ca[%s] valid_to[%s]\",\n\t\tinvocationDetail.InvocationID,\n\t\tusername,\n\t\tinvocationDetail.ClientIP,\n\t\t\"\", // Force command\n\t\tssh.FingerprintSHA256(pubKey),\n\t\tos.Getenv(\"WEBSITE_DEPLOYMENT_ID\"),\n\t\tvalidTo.Format(\"2006/01/02 15:04:05\"),\n\t)\n\t// Create a certificate with all of our details\n\tcertificate := ssh.Certificate{\n\t\tNonce: nonce,\n\t\tKey: pubKey,\n\t\tSerial: serial.Uint64(),\n\t\tCertType: ssh.UserCert,\n\t\tKeyId: keyID,\n\t\tValidPrincipals: []string{\n\t\t\tusername,\n\t\t},\n\t\tPermissions: ssh.Permissions{\n\t\t\tCriticalOptions: criticalOptions,\n\t\t\tExtensions: extensions,\n\t\t},\n\t\tValidAfter: uint64(validFrom.Unix()),\n\t\tValidBefore: uint64(validTo.Unix()),\n\t}\n\n\t// Create a \"KeyVaultSigner\" which returns a crypto.Signer that interfaces with Azure Key Vault\n\tkeyvaultSigner := azure.NewKeyVaultSigner(keyvaultClient, keyvaultName, keyName)\n\n\t// Create an SSHAlgorithmSigner with an RSA, SHA256 algorithm\n\tsshAlgorithmSigner, err := NewAlgorithmSignerFromSigner(keyvaultSigner, ssh.SigAlgoRSASHA2256)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Sign the certificate!\n\tif err := certificate.SignCert(rand.Reader, sshAlgorithmSigner); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Extract the public key (certificate) to return to the user\n\tpubkey, err := ssh.ParsePublicKey(certificate.Marshal())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Convert the cert to the correct format and return it\n\tcert, _, _, _, err := ssh.ParseAuthorizedKey(ssh.MarshalAuthorizedKey(pubkey))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cert.(*ssh.Certificate), nil\n}", "func (*X509Certificate) Descriptor() ([]byte, []int) {\n\treturn file_sigstore_common_proto_rawDescGZIP(), []int{9}\n}", "func (o MetadataResponseOutput) Fingerprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MetadataResponse) string { return v.Fingerprint }).(pulumi.StringOutput)\n}", "func (a tlsCredentials) getCertFilename() string {\n\tif a.cert.Secret != nil {\n\t\treturn a.cert.Secret.Key\n\t} else if a.cert.ConfigMap != nil {\n\t\treturn a.cert.ConfigMap.Key\n\t}\n\n\treturn \"\"\n}", "func FingerprintWithExt(parent, uri string) string {\n\tid := id.Checksum(parent + uri)\n\tparts := strings.Split(uri, \".\")\n\tif len(parts) == 0 {\n\t\treturn id\n\t}\n\treturn fmt.Sprintf(\"%s/%s.%s\", parent, id, parts[len(parts)-1])\n}", "func (s CertStore) GetByThumb(thumb string) (res Cert, err error) {\n\tbThumb, err := hex.DecodeString(thumb)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar hashBlob C.CRYPT_HASH_BLOB\n\thashBlob.cbData = C.DWORD(len(bThumb))\n\tbThumbPtr := C.CBytes(bThumb)\n\tdefer C.free(bThumbPtr)\n\thashBlob.pbData = (*C.BYTE)(bThumbPtr)\n\tif res.pCert = s.getCert(C.CERT_FIND_HASH, unsafe.Pointer(&hashBlob)); res.pCert == nil {\n\t\terr = getErr(\"Error looking up certificate by thumb\")\n\t\treturn\n\t}\n\treturn\n}", "func (s *MapSchema) Fingerprint() (*Fingerprint, error) {\n\tif s.fingerprint == nil {\n\t\tif f, err := calculateSchemaFingerprint(s); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\ts.fingerprint = f\n\t\t}\n\t}\n\treturn s.fingerprint, nil\n}", "func (b SignDetail) Digest() (common.Hash, error) {\n\tvar hash common.Hash\n\tvar signFormatData apitypes.TypedData\n\tif err := json.Unmarshal([]byte(b.SignSchema.Schema), &signFormatData); err != nil {\n\t\treturn hash, err\n\t}\n\tparams, err := b.GetContractParams()\n\tif err != nil {\n\t\treturn hash, err\n\t}\n\tdata, err := buildTypedData(signFormatData, params)\n\n\tif err != nil {\n\t\treturn hash, err\n\t}\n\thash, err = crypto2.Keccak256HashEIP712(data)\n\treturn hash, err\n}", "func SSHFingerprintSHA256FromString(key string) string {\n\tpubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(key))\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn SSHFingerprintSHA256(pubKey)\n}", "func getIDsFromCertificate(peer *x509.Certificate) (string, string, error) {\n\tswitch {\n\tcase len(peer.URIs) == 0:\n\t\treturn \"\", \"\", errors.New(\"peer certificate contains no URI SAN\")\n\tcase len(peer.URIs) > 1:\n\t\treturn \"\", \"\", errors.New(\"peer certificate contains more than one URI SAN\")\n\t}\n\n\tid := peer.URIs[0]\n\n\tif err := ValidateURI(id, AllowAny()); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn id.String(), TrustDomainID(id.Host), nil\n}", "func (ck *CertKey) Cert() []byte { return ck.cert }", "func (c *AccountsContainersUpdateCall) Fingerprint(fingerprint string) *AccountsContainersUpdateCall {\n\tc.params_.Set(\"fingerprint\", fmt.Sprintf(\"%v\", fingerprint))\n\treturn c\n}", "func (*FloatSchema) Fingerprint() (*Fingerprint, error) {\n\treturn &Fingerprint{\n\t\t30, 113, 249, 236, 5, 29, 102, 63, 86, 176, 216, 225, 252, 132, 215, 26,\n\t\t165, 108, 207, 233, 250, 147, 170, 32, 209, 5, 71, 167, 171, 235, 92, 192,\n\t}, nil\n}", "func (tc ScannerTestcase) Digest() claircore.Digest {\n\td, err := claircore.ParseDigest(tc.Hash)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn d\n}", "func (c *AccountsContainersVersionsUpdateCall) Fingerprint(fingerprint string) *AccountsContainersVersionsUpdateCall {\n\tc.params_.Set(\"fingerprint\", fmt.Sprintf(\"%v\", fingerprint))\n\treturn c\n}", "func (o BackendServiceFabricClusterServerX509NameOutput) IssuerCertificateThumbprint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BackendServiceFabricClusterServerX509Name) string { return v.IssuerCertificateThumbprint }).(pulumi.StringOutput)\n}", "func (c *AccountsContainersMacrosUpdateCall) Fingerprint(fingerprint string) *AccountsContainersMacrosUpdateCall {\n\tc.params_.Set(\"fingerprint\", fmt.Sprintf(\"%v\", fingerprint))\n\treturn c\n}", "func getSignatureAlgorithm(cert *Certificate) (SignatureScheme, error) {\n\tswitch sk := cert.PrivateKey.(type) {\n\tcase *ecdsa.PrivateKey:\n\t\tpk := sk.Public().(*ecdsa.PublicKey)\n\t\tcurveName := pk.Curve.Params().Name\n\t\tcertAlg := cert.Leaf.PublicKeyAlgorithm\n\t\tif certAlg == x509.ECDSA && curveName == \"P-256\" {\n\t\t\treturn ECDSAWithP256AndSHA256, nil\n\t\t} else if certAlg == x509.ECDSA && curveName == \"P-384\" {\n\t\t\treturn ECDSAWithP384AndSHA384, nil\n\t\t} else if certAlg == x509.ECDSA && curveName == \"P-521\" {\n\t\t\treturn ECDSAWithP521AndSHA512, nil\n\t\t} else {\n\t\t\treturn undefinedSignatureScheme, fmt.Errorf(\"using curve %s for %s is not supported\", curveName, cert.Leaf.SignatureAlgorithm)\n\t\t}\n\tcase ed25519.PrivateKey:\n\t\treturn Ed25519, nil\n\tcase *rsa.PrivateKey:\n\t\t// If the certificate has the RSAEncryption OID there are a number of valid signature schemes that may sign the DC.\n\t\t// In the absence of better information, we make a reasonable choice.\n\t\treturn PSSWithSHA256, nil\n\tdefault:\n\t\treturn undefinedSignatureScheme, fmt.Errorf(\"tls: unsupported algorithm for signing Delegated Credential\")\n\t}\n}", "func (lc *localChain) GetBestKnownDigest() ([32]uint8, error) {\n\tpanic(\"not implemented yet\")\n}", "func (m *WindowsInformationProtectionDataRecoveryCertificate) GetCertificate()([]byte) {\n val, err := m.GetBackingStore().Get(\"certificate\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]byte)\n }\n return nil\n}", "func (o MetadataResponsePtrOutput) Fingerprint() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *MetadataResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Fingerprint\n\t}).(pulumi.StringPtrOutput)\n}" ]
[ "0.7413812", "0.67597044", "0.6694062", "0.66871536", "0.66629297", "0.6651699", "0.6395457", "0.62573737", "0.6215708", "0.6189705", "0.61763316", "0.61329055", "0.61223704", "0.60440844", "0.60190755", "0.5937564", "0.58921087", "0.58856016", "0.5885489", "0.5769462", "0.57662416", "0.5715622", "0.56792396", "0.5654955", "0.56371623", "0.5556725", "0.55519164", "0.5532367", "0.55188155", "0.55188155", "0.5501225", "0.5500876", "0.54786354", "0.5452931", "0.5440249", "0.54186034", "0.53941476", "0.53772897", "0.53720605", "0.53299475", "0.53237283", "0.5314313", "0.5303003", "0.52954775", "0.52859324", "0.52844936", "0.52745485", "0.5268408", "0.5239737", "0.5213188", "0.51725197", "0.51680636", "0.5157618", "0.51489294", "0.51411104", "0.51277494", "0.51252455", "0.5109892", "0.5092671", "0.5084725", "0.5084576", "0.50613785", "0.50487185", "0.5042987", "0.50401485", "0.5028986", "0.5009062", "0.50040203", "0.4981404", "0.49642533", "0.49473348", "0.49459726", "0.4938344", "0.49260908", "0.49157637", "0.4914222", "0.4899852", "0.48995224", "0.48979825", "0.48975858", "0.48935756", "0.488717", "0.4883306", "0.4881922", "0.4876399", "0.4872146", "0.48690322", "0.48514968", "0.48476666", "0.4811592", "0.4804049", "0.4802627", "0.48018813", "0.48015532", "0.48008308", "0.4791026", "0.47888827", "0.47789332", "0.47759503", "0.4774853" ]
0.7799496
0
RequestBindIPToNatgateway in aliyun don't need to check eip again which is different from SManagerResongDriver. RequestBindIPToNatgateway because func ieip.Associate will fail if eip has been associate
func (self *SAliyunRegionDriver) RequestBindIPToNatgateway(ctx context.Context, task taskman.ITask, natgateway *models.SNatGateway, eipId string) error { taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { model, err := models.ElasticipManager.FetchById(eipId) if err != nil { return nil, err } lockman.LockObject(ctx, model) defer lockman.ReleaseObject(ctx, model) eip := model.(*models.SElasticip) iregion, err := natgateway.GetIRegion() if err != nil { return nil, err } ieip, err := iregion.GetIEipById(eip.GetExternalId()) if err != nil { return nil, errors.Wrap(err, "fetch eip failed") } conf := &cloudprovider.AssociateConfig{ InstanceId: natgateway.GetExternalId(), Bandwidth: eip.Bandwidth, AssociateType: api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY, } err = ieip.Associate(conf) if err != nil { return nil, errors.Wrap(err, "fail to bind eip to natgateway") } err = cloudprovider.WaitStatus(ieip, api.EIP_STATUS_READY, 5*time.Second, 100*time.Second) if err != nil { return nil, err } // database _, err = db.Update(eip, func() error { eip.AssociateType = api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY eip.AssociateId = natgateway.GetId() return nil }) if err != nil { return nil, errors.Wrapf(err, "fail to update eip '%s' in database", eip.Id) } return nil, nil }) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (service *HTTPRestService) reserveIPAddress(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"[Azure CNS] reserveIPAddress\")\n\n\tvar req cns.ReserveIPAddressRequest\n\treturnMessage := \"\"\n\treturnCode := 0\n\taddr := \"\"\n\taddress := \"\"\n\terr := service.Listener.Decode(w, r, &req)\n\n\tlog.Request(service.Name, &req, err)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif req.ReservationID == \"\" {\n\t\treturnCode = ReservationNotFound\n\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. ReservationId is empty\")\n\t}\n\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tic := service.ipamClient\n\n\t\tifInfo, err := service.imdsClient.GetPrimaryInterfaceInfoFromMemory()\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetPrimaryIfaceInfo failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\n\t\tasID, err := ic.GetAddressSpace()\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetAddressSpace failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\n\t\tpoolID, err := ic.GetPoolID(asID, ifInfo.Subnet)\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetPoolID failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\n\t\taddr, err = ic.ReserveIPAddress(poolID, req.ReservationID)\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] ReserveIpAddress failed with %+v\", err.Error())\n\t\t\treturnCode = AddressUnavailable\n\t\t\tbreak\n\t\t}\n\n\t\taddressIP, _, err := net.ParseCIDR(addr)\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] ParseCIDR failed with %+v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\t\taddress = addressIP.String()\n\n\tdefault:\n\t\treturnMessage = \"[Azure CNS] Error. ReserveIP did not receive a POST.\"\n\t\treturnCode = InvalidParameter\n\n\t}\n\n\tresp := cns.Response{\n\t\tReturnCode: returnCode,\n\t\tMessage: returnMessage,\n\t}\n\n\treserveResp := &cns.ReserveIPAddressResponse{Response: resp, IPAddress: address}\n\terr = service.Listener.Encode(w, &reserveResp)\n\tlog.Response(service.Name, reserveResp, resp.ReturnCode, ReturnCodeToString(resp.ReturnCode), err)\n}", "func configureApInterface(ip string, iface string) error {\n\tcmd := exec.Command(\"ip\", \"addr\", \"add\", ip, \"dev\", iface)\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := cmd.Wait(); err != nil {\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func maybeUpdateBridgeIPAddr(\n\tctx *zedrouterContext,\n\tifname string) {\n\n\tstatus := getSwitchNetworkInstanceUsingPort(ctx, ifname)\n\tif status == nil {\n\t\treturn\n\t}\n\tlog.Infof(\"maybeUpdateBridgeIPAddr: found \"+\n\t\t\"NetworkInstance %s\", status.DisplayName)\n\n\tif !status.Activated {\n\t\tlog.Errorf(\"maybeUpdateBridgeIPAddr: \"+\n\t\t\t\"network instance %s not activated\\n\", status.DisplayName)\n\t\treturn\n\t}\n\tupdateBridgeIPAddr(ctx, status)\n\treturn\n}", "func (b *Bridge) setIP() error {\n\tif b.IP == nil {\n\t\treturn nil\n\t}\n\tcmd := exec.Command(\"ifconfig\", b.Device, b.IP.String(), \"netmask\", fmt.Sprintf(\"0x%s\", b.Netmask.String()))\n\tfmt.Printf(\"cmd: %s\\n\", strings.Join(cmd.Args, \" \"))\n\treturn cmd.Run()\n}", "func updateBridgeIPAddr(\n\tctx *zedrouterContext,\n\tstatus *types.NetworkInstanceStatus) {\n\n\tlog.Infof(\"updateBridgeIPAddr(%s)\\n\", status.Key())\n\n\told := status.BridgeIPAddr\n\terr := setBridgeIPAddr(ctx, status)\n\tif err != nil {\n\t\tlog.Infof(\"updateBridgeIPAddr: %s\\n\", err)\n\t\treturn\n\t}\n\tif status.BridgeIPAddr != old && status.BridgeIPAddr != \"\" {\n\t\tlog.Infof(\"updateBridgeIPAddr(%s) restarting dnsmasq\\n\",\n\t\t\tstatus.Key())\n\t\trestartDnsmasq(status)\n\t}\n}", "func (s *BasejossListener) EnterFuncIp(ctx *FuncIpContext) {}", "func SetTunIP(tunName string, mtu uint32, subnetIP global.Address, subnetMask uint8, client bool) error {\n\tip, subnet, err := ParseCIDR(subnetIP, subnetMask)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tip = ip.To4()\n\tif ip[3]%2 == 0 {\n\t\treturn errors.New(\"Invalid ip address.\")\n\t}\n\n\tpeer := net.IP(make([]byte, 4))\n\tcopy([]byte(peer), []byte(ip))\n\tpeer[3]++\n\n\tsargs := fmt.Sprintf(\"%s %s %s up\", tunName, ip, peer)\n\targs := strings.Split(sargs, \" \")\n\tcmd := exec.Command(\"ifconfig\", args...)\n\tif err := cmd.Run(); nil != err {\n\t\treturn errors.New(fmt.Sprintf(\"ifconfig %v err:%v\", sargs, err))\n\t} else {\n\t\tlog.Infof(\"ifconfig %s\", sargs)\n\t}\n\n\tsargs = fmt.Sprintf(\"link set dev %s up mtu %d qlen 100\", tunName, mtu)\n\targs = strings.Split(sargs, \" \")\n\tcmd = exec.Command(\"ip\", args...)\n\tif err := cmd.Run(); nil != err {\n\t\treturn errors.New(fmt.Sprintf(\"ip %v err:%v\", sargs, err))\n\t} else {\n\t\tlog.Infof(\"ip %s\", sargs)\n\t}\n\n\tsargs = fmt.Sprintf(\"route add %s via %s dev %s\", subnet, peer, tunName)\n\targs = strings.Split(sargs, \" \")\n\tcmd = exec.Command(\"ip\", args...)\n\tif err := cmd.Run(); nil != err {\n\t\treturn errors.New(fmt.Sprintf(\"ip %v err:%v\", sargs, err))\n\t} else {\n\t\tlog.Infof(\"ip %s\", sargs)\n\t}\n\n\tif client { // for client\n\t\tpeerStr := fmt.Sprintf(\"%d.%d.%d.%d\", peer[0], peer[1], peer[2], peer[3])\n\t\tif err := RedirectGateway(tunName, peerStr); nil != err {\n\t\t\tlog.Errorf(\"%v\", err)\n\t\t}\n\t} else { // for server\n\t\tsargs = \"net.ipv4.ip_forward=1\"\n\t\targs = strings.Split(sargs, \" \")\n\t\tcmd = exec.Command(\"sysctl\", args...)\n\t\tif err := cmd.Run(); nil != err {\n\t\t\tlog.Errorf(\"sysctl %v err:%v\", sargs, err)\n\t\t}\n\n\t\tsargs = \"-t nat -A POSTROUTING -j MASQUERADE\"\n\t\targs = strings.Split(sargs, \" \")\n\t\tcmd = exec.Command(\"iptables\", args...)\n\t\tif err := cmd.Run(); nil != err {\n\t\t\tlog.Errorf(\"iptables %v err:%v\", sargs, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (_class PIFClass) ReconfigureIP(sessionID SessionRef, self PIFRef, mode IPConfigurationMode, ip string, netmask string, gateway string, dns string) (_err error) {\n\t_method := \"PIF.reconfigure_ip\"\n\t_sessionIDArg, _err := convertSessionRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"session_id\"), sessionID)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_selfArg, _err := convertPIFRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"self\"), self)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_modeArg, _err := convertEnumIPConfigurationModeToXen(fmt.Sprintf(\"%s(%s)\", _method, \"mode\"), mode)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_ipArg, _err := convertStringToXen(fmt.Sprintf(\"%s(%s)\", _method, \"IP\"), ip)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_netmaskArg, _err := convertStringToXen(fmt.Sprintf(\"%s(%s)\", _method, \"netmask\"), netmask)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_gatewayArg, _err := convertStringToXen(fmt.Sprintf(\"%s(%s)\", _method, \"gateway\"), gateway)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_dnsArg, _err := convertStringToXen(fmt.Sprintf(\"%s(%s)\", _method, \"DNS\"), dns)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_, _err = _class.client.APICall(_method, _sessionIDArg, _selfArg, _modeArg, _ipArg, _netmaskArg, _gatewayArg, _dnsArg)\n\treturn\n}", "func (nu NetworkUtils) EnableIPForwarding(ifName string) error {\n\t// Enable ip forwading on linux vm.\n\t// sysctl -w net.ipv4.ip_forward=1\n\tcmd := fmt.Sprint(enableIPForwardCmd)\n\t_, err := nu.plClient.ExecuteCommand(cmd)\n\tif err != nil {\n\t\tlog.Printf(\"[net] Enable ipforwarding failed with: %v\", err)\n\t\treturn err\n\t}\n\n\t// Append a rule in forward chain to allow forwarding from bridge\n\tif err := iptables.AppendIptableRule(iptables.V4, iptables.Filter, iptables.Forward, \"\", iptables.Accept); err != nil {\n\t\tlog.Printf(\"[net] Appending forward chain rule: allow traffic coming from snatbridge failed with: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func setupIpvlanInRemoteNs(netNs ns.NetNS, srcIfName, dstIfName string) (*ebpf.Map, error) {\n\trl := unix.Rlimit{\n\t\tCur: unix.RLIM_INFINITY,\n\t\tMax: unix.RLIM_INFINITY,\n\t}\n\n\terr := unix.Setrlimit(unix.RLIMIT_MEMLOCK, &rl)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to increase rlimit: %s\", err)\n\t}\n\n\tm, err := ebpf.NewMap(&ebpf.MapSpec{\n\t\tType: ebpf.ProgramArray,\n\t\tKeySize: 4,\n\t\tValueSize: 4,\n\t\tMaxEntries: 1,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create root BPF map for %q: %s\", dstIfName, err)\n\t}\n\n\terr = netNs.Do(func(_ ns.NetNS) error {\n\t\tvar err error\n\n\t\tif srcIfName != dstIfName {\n\t\t\terr = link.Rename(srcIfName, dstIfName)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to rename ipvlan from %q to %q: %s\", srcIfName, dstIfName, err)\n\t\t\t}\n\t\t}\n\n\t\tipvlan, err := netlink.LinkByName(dstIfName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to lookup ipvlan device %q: %s\", dstIfName, err)\n\t\t}\n\n\t\tqdiscAttrs := netlink.QdiscAttrs{\n\t\t\tLinkIndex: ipvlan.Attrs().Index,\n\t\t\tHandle: netlink.MakeHandle(0xffff, 0),\n\t\t\tParent: netlink.HANDLE_CLSACT,\n\t\t}\n\t\tqdisc := &netlink.GenericQdisc{\n\t\t\tQdiscAttrs: qdiscAttrs,\n\t\t\tQdiscType: \"clsact\",\n\t\t}\n\t\tif err = netlink.QdiscAdd(qdisc); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create clsact qdisc on %q: %s\", dstIfName, err)\n\t\t}\n\n\t\tprog, err := ebpf.NewProgram(&ebpf.ProgramSpec{\n\t\t\tType: ebpf.SchedCLS,\n\t\t\tInstructions: getEntryProgInstructions(m.FD()),\n\t\t\tLicense: \"ASL2\",\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to load root BPF prog for %q: %s\", dstIfName, err)\n\t\t}\n\n\t\tfilterAttrs := netlink.FilterAttrs{\n\t\t\tLinkIndex: ipvlan.Attrs().Index,\n\t\t\tParent: netlink.HANDLE_MIN_EGRESS,\n\t\t\tHandle: netlink.MakeHandle(0, 1),\n\t\t\tProtocol: 3,\n\t\t\tPriority: 1,\n\t\t}\n\t\tfilter := &netlink.BpfFilter{\n\t\t\tFilterAttrs: filterAttrs,\n\t\t\tFd: prog.FD(),\n\t\t\tName: \"polEntry\",\n\t\t\tDirectAction: true,\n\t\t}\n\t\tif err = netlink.FilterAdd(filter); err != nil {\n\t\t\tprog.Close()\n\t\t\treturn fmt.Errorf(\"failed to create cls_bpf filter on %q: %s\", dstIfName, err)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tm.Close()\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}", "func (p *PoolAllocatorType) SetIPAddrIfInsidePool(ipAddressStr string) {\n\n\t// see if this address falls within the subnet, and if it does, set the addr in the bitmap\n\tipAddress := net.ParseIP(ipAddressStr)\n\tif ipAddress != nil && p.ipNetwork.Contains(ipAddress) {\n\n\t\t//fmt.Println(\"setIpAddrIfInsideSubnet: \", ipAddress)\n\t\tip := ipAddress.To4()\n\t\tipAddressu32 := uint32(ip[0])<<24 | uint32(ip[1])<<16 | uint32(ip[2])<<8 | uint32(ip[3])\n\t\tipID := ipAddressu32 &^ p.ipMasku32\n\t\t//fmt.Println(\"setIpAddrIfInsideSubnet: ipID\", ipID)\n\t\tp.Allocated[ipID] = struct{}{}\n\t}\n}", "func (m *BgpConfiguration) SetIpAddress(value *string)() {\n err := m.GetBackingStore().Set(\"ipAddress\", value)\n if err != nil {\n panic(err)\n }\n}", "func calcGatewayIP(ipn *net.IPNet) net.IP {\n\tnid := ipn.IP.Mask(ipn.Mask)\n\treturn ip.NextIP(nid)\n}", "func (this *IPAMNetworkPool) AssignIP(ip net.IP) net.IP {\n\t// Lock until we have made a decision\n\tthis.mtx.Lock()\n\tdefer this.mtx.Unlock()\n\n\tif ip != nil {\n\t\t// Is the IP the gateway? (i.e. container wanting to become the gateway)\n\t\tif this.gateway.Equal(ip) {\n\t\t\t// The gateway is already checked against the pool IP. So just check\n\t\t\t// someone else hasn't claimed it already.\n\t\t\t// Note: the usability list is not checked here, since that list\n\t\t\t// always marks gateway IPs as unusable.\n\t\t\tif this.isAssigned(ip) == false {\n\t\t\t\tthis.assignedIPs[ip.String()] = ip\n\t\t\t\treturn ip\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\t// Not containable in this pool\n\t\tif this.subpool.Contains(ip) == false {\n\t\t\treturn nil\n\t\t}\n\n\t\tif this.isAssigned(ip) {\n\t\t\treturn nil\n\t\t} else {\n\t\t\tthis.assignedIPs[ip.String()] = ip\n\t\t\treturn ip\n\t\t}\n\t}\n\n\t// Nil IP - probe the map with incrementing IPs still we find one we can use\n\tfor ip := this.subpool.IP.Mask(this.subpool.Mask); this.subpool.Contains(ip); ip = netaddr.IPAdd(ip, 1) {\n\t\tif (this.isAssigned(ip) == false) && this.isUsable(ip) {\n\t\t\tthis.assignedIPs[ip.String()] = ip\n\t\t\treturn ip\n\t\t}\n\t}\n\n\t// Couldn't find anything to assign.\n\treturn nil\n}", "func (n *Network) trySetFlagBindIP(ip net.IP) error {\n\tif len(ip) != 0 {\n\t\tif len(n.BindIP) != 0 {\n\t\t\treturn errors.New(\"bind ip only can set by one of the flags or configuration file\")\n\t\t}\n\n\t\tn.BindIP = ip.String()\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func (twd *TCPWaveDriver) RequestAddress(config NetConfig, subnetAddr net.IPNet, macAddr string,\n containerName string, containerID string) (string,error){\n\n // Create network\n var network *twc.Network\n var err error\n networkAddress := strings.Split(config.IPAM.ContainerNetwork, \"/\")[0]\n network,err = twd.ObjMgr.GetNetwork(config.IPAM.ContainerNetwork, config.IPAM.Org)\n if err!=nil{\n glog.Infof(\"Creating Network with address : %s\", config.IPAM.ContainerNetwork)\n network = &twc.Network{}\n network.Name = config.IPAM.NetworkName\n network.Description = \"Kubernetes Network\"\n network.Organization = config.IPAM.Org\n addrBits := strings.Split(networkAddress, \".\")\n addr1,_ := strconv.Atoi(addrBits[0])\n network.Addr1 = addr1\n addr2,_ := strconv.Atoi(addrBits[1])\n network.Addr2 = addr2\n addr3,_ := strconv.Atoi(addrBits[2])\n network.Addr3 = addr3\n addr4,_ := strconv.Atoi(addrBits[3])\n network.Addr4 = addr4\n network.DMZVisible = \"no\"\n network.MaskLen = int(config.IPAM.NetMaskLength)\n _,err1 := twd.ObjMgr.CreateNetwork(*network)\n if err1!=nil{\n return \"\", err1\n }\n }\n // Create Subnet\n var subnet *twc.Subnet\n subnet,err = twd.ObjMgr.GetSubnet(subnetAddr.String(), config.IPAM.Org)\n if err!=nil {\n glog.Infof(\"Creating Subnet with address : %s\", subnetAddr.String())\n subnet = &twc.Subnet{MaskLen: 26}\n subnet.Name = \"K8S Subnet\"\n subnet.Description = \"subnet for kubernetes\"\n subnet.Organization = config.IPAM.Org\n subNtAddr := strings.Split(subnetAddr.String(), \"/\")[0]\n addrBits := strings.Split(subNtAddr, \".\")\n glog.Info(\"Address Bits Array : \" + addrBits[3])\n addr1,_ := strconv.Atoi(addrBits[0])\n subnet.Addr1 = addr1\n addr2,_ := strconv.Atoi(addrBits[1])\n subnet.Addr2 = addr2\n addr3,_ := strconv.Atoi(addrBits[2])\n subnet.Addr3 = addr3\n addr4,_ := strconv.Atoi(addrBits[3])\n subnet.Addr4 = addr4\n subnet.RouterAddr = addrBits[0] + \".\" + addrBits[1] + \".\" + addrBits[2] + \".\" + strconv.Itoa(addr4 + 1)\n subnet.NetworkAddr = networkAddress\n subnet.PrimaryDomain = config.IPAM.Domain\n _,err1 := twd.ObjMgr.CreateSubnet(*subnet)\n if err1!=nil{\n return \"\", err1\n }\n }\n\n // Fetch available IP from IPAM\n ip,err2 := twd.ObjMgr.GetNextFreeIP(subnetAddr.String(), config.IPAM.Org)\n if err2!=nil{\n return \"\",err2\n }\n\n mac := macAddr\n glog.Infof(\"Free Ip received from IPAM = %s , mac addr = %s\", ip, mac)\n\n if config.Type == \"bridge\" {\n\t\thwAddr, err := hwaddr.GenerateHardwareAddr4(net.ParseIP(ip), hwaddr.PrivateMACPrefix)\n glog.Infof(\"Computed Mac addr for bridge type: %s\", hwAddr.String())\n mac = hwAddr.String()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Problem while generating hardware address using ip: %s\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n _, err = twd.ObjMgr.CreateIPAddress(ip, mac, subnetAddr.IP.String(), config.IPAM.Domain, config.IPAM.Org, containerName)\n if err!=nil{\n return \"\", err\n }\n glog.Infof(\"Ip Created in IPAM = %s\", ip)\n return ip, nil\n}", "func addIPAddress(ch api.Channel, index interface_types.InterfaceIndex) {\n\tfmt.Printf(\"Adding IP address to interface to interface index %d\\n\", index)\n\n\treq := &interfaces.SwInterfaceAddDelAddress{\n\t\tSwIfIndex: index,\n\t\tIsAdd: true,\n\t\tPrefix: ip_types.AddressWithPrefix{\n\t\t\tAddress: ip_types.Address{\n\t\t\t\tAf: ip_types.ADDRESS_IP4,\n\t\t\t\tUn: ip_types.AddressUnionIP4(ip_types.IP4Address{10, 10, 0, uint8(index)}),\n\t\t\t},\n\t\t\tLen: 32,\n\t\t},\n\t}\n\tmarshal(req)\n\treply := &interfaces.SwInterfaceAddDelAddressReply{}\n\n\tif err := ch.SendRequest(req).ReceiveReply(reply); err != nil {\n\t\tlogError(err, \"adding IP address to interface\")\n\t\treturn\n\t}\n\tfmt.Printf(\"reply: %+v\\n\", reply)\n\n\tfmt.Println(\"OK\")\n\tfmt.Println()\n}", "func (adminAPIOp) BypassInteractionIPRateLimit() bool { return true }", "func provideIP(w http.ResponseWriter, req *http.Request) {\n\tresult, error := evaluateIPAddress(req)\n\tif error != nil {\n\t\tresult.Error = error.Error()\n\t}\n\tgo result.fetchGeoAndPersist()\n\n\twriteReponse(w, req, ipTemplate, result)\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv4LabeledUnicast(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv4labeledunicastgroupIpv4LabeledUnicastBodyParam FrinxOpenconfigBgpBgpcommonmpipv4labeledunicastgroupIpv4LabeledUnicastRequest1, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-labeled-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv4labeledunicastgroupIpv4LabeledUnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func configureContainerAddr(netns ns.NetNS, containerInterface *current.Interface, result *current.Result) error {\n\tvar containerVeth *net.Interface\n\tif err := netns.Do(func(containerNs ns.NetNS) error {\n\t\tvar err error\n\t\tcontainerVeth, err = net.InterfaceByName(containerInterface.Name)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to find container interface %s in ns %s\", containerInterface.Name, netns.Path())\n\t\t\treturn err\n\t\t}\n\t\tif err := ipam.ConfigureIface(containerInterface.Name, result); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif containerVeth != nil {\n\t\t// Send 3 GARP packets in another goroutine with 50ms interval. It's because Openflow entries are installed async,\n\t\t// and the gratuitous ARP could be sent out after the Openflow entries are installed. Using another goroutine\n\t\t// ensures the processing of CNI ADD request is not blocked.\n\t\tgo func() {\n\t\t\tnetns.Do(func(containerNs ns.NetNS) error {\n\t\t\t\tcount := 0\n\t\t\t\tfor count < 3 {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-time.Tick(50 * time.Millisecond):\n\t\t\t\t\t\t// Send gratuitous ARP to network in case of stale mappings for this IP address\n\t\t\t\t\t\t// (e.g. if a previous - deleted - Pod was using the same IP).\n\t\t\t\t\t\tfor _, ipc := range result.IPs {\n\t\t\t\t\t\t\tif ipc.Version == \"4\" {\n\t\t\t\t\t\t\t\tarping.GratuitousArpOverIface(ipc.Address.IP, *containerVeth)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcount += 1\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}()\n\t}\n\treturn nil\n}", "func setupCalicoNodeVxlan(ctx context.Context, c client.Interface, nodeName string, vtepIP cnet.IP, mac, publicIP string) error {\n\tlog.Infof(\"Updating Calico Node %s with vtep IP %s, Mac %s.\", nodeName, vtepIP.String(), mac)\n\n\t// Assign vtep IP.\n\t// Check current status of vtep IP. It could be assigned already if migration controller restarts.\n\tassign := true\n\tattr, _, err := c.IPAM().GetAssignmentAttributes(ctx, vtepIP)\n\tif err == nil {\n\t\tif attr[ipam.AttributeType] == ipam.AttributeTypeVXLAN && attr[ipam.AttributeNode] == nodeName {\n\t\t\t// The tunnel address is still valid, do nothing.\n\t\t\tlog.Infof(\"Calico Node %s vtep IP been assigned already.\", nodeName)\n\t\t\tassign = false\n\t\t} else {\n\t\t\t// The tunnel address has been allocated to something else, return error.\n\t\t\treturn fmt.Errorf(\"vtep IP %s has been occupied\", vtepIP.String())\n\t\t}\n\t} else if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {\n\t\t// The tunnel address is not assigned, assign it.\n\t\tlog.WithField(\"vtepIP\", vtepIP.String()).Info(\"assign a new vtep IP\")\n\t} else {\n\t\t// Failed to get assignment attributes, datastore connection issues possible.\n\t\tlog.WithError(err).Errorf(\"Failed to get assignment attributes for vtep IP '%s'\", vtepIP.String())\n\t\treturn fmt.Errorf(\"Failed to get vtep IP %s attribute\", vtepIP.String())\n\t}\n\n\tif assign {\n\t\t// Build attributes and handle for this allocation.\n\t\tattrs := map[string]string{ipam.AttributeNode: nodeName}\n\t\tattrs[ipam.AttributeType] = ipam.AttributeTypeVXLAN\n\t\thandle := fmt.Sprintf(\"vxlan-tunnel-addr-%s\", nodeName)\n\n\t\terr := c.IPAM().AssignIP(ctx, ipam.AssignIPArgs{\n\t\t\tIP: vtepIP,\n\t\t\tHostname: nodeName,\n\t\t\tHandleID: &handle,\n\t\t\tAttrs: attrs,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to assign vtep IP %s\", vtepIP.String())\n\t\t}\n\t\tlog.Infof(\"Calico Node %s vtep IP assigned.\", nodeName)\n\t}\n\n\t// Update Calico node with vtep IP/Mac/PublicIP\n\tnode, err := c.Nodes().Get(ctx, nodeName, options.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If node has correct vxlan setup, do nothing.\n\tif node.Spec.IPv4VXLANTunnelAddr == vtepIP.String() && node.Spec.VXLANTunnelMACAddr == mac &&\n\t\t(node.Spec.BGP != nil && node.Spec.BGP.IPv4Address == publicIP) {\n\t\treturn nil\n\t}\n\n\tlog.Infof(\"Calico Node current value: %+v.\", node)\n\n\tnode.Spec.BGP = &libapi.NodeBGPSpec{}\n\t// Set public ip with subnet /32.\n\t// The subnet part is required to pass Felix validation.\n\tnode.Spec.BGP.IPv4Address = fmt.Sprintf(\"%s/32\", publicIP)\n\tnode.Spec.IPv4VXLANTunnelAddr = vtepIP.String()\n\tnode.Spec.VXLANTunnelMACAddr = mac\n\t_, err = c.Nodes().Update(ctx, node, options.SetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Calico Node %s vtep IP/Mac/PublicIP updated.\", nodeName)\n\treturn nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv4Unicast(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv4unicastgroupIpv4UnicastBodyParam FrinxOpenconfigBgpBgpcommonmpipv4unicastgroupIpv4UnicastRequest1, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv4unicastgroupIpv4UnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiL3vpnIpv4Unicast(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpl3vpnipv4unicastgroupL3vpnIpv4UnicastBodyParam FrinxOpenconfigBgpBgpcommonmpl3vpnipv4unicastgroupL3vpnIpv4UnicastRequest1, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpl3vpnipv4unicastgroupL3vpnIpv4UnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv4Unicast(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpl3vpnipv4unicastgroupL3vpnIpv4UnicastBodyParam FrinxOpenconfigBgpBgpcommonmpl3vpnipv4unicastgroupL3vpnIpv4UnicastRequest, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpl3vpnipv4unicastgroupL3vpnIpv4UnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv4LabeledUnicast(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv4labeledunicastgroupIpv4LabeledUnicastBodyParam FrinxOpenconfigBgpBgpcommonmpipv4labeledunicastgroupIpv4LabeledUnicastRequest, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-labeled-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv4labeledunicastgroupIpv4LabeledUnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func setEdisonInterfaces(i config.Interfaces, ip string) error {\n\n\tif dialogs.YesNoDialog(\"Would you like to assign static IP wlan address for your board?\") {\n\n\t\t// assign static ip\n\t\tfmt.Println(\"[+] ********NOTE: ADJUST THESE VALUES ACCORDING TO YOUR LOCAL NETWORK CONFIGURATION********\")\n\n\t\tfor {\n\t\t\tfmt.Printf(\"[+] Current values are:\\n \\t[+] Address:%s\\n\\t[+] Gateway:%s\\n\\t[+] Netmask:%s\\n\\t[+] DNS:%s\\n\",\n\t\t\t\ti.Address, i.Gateway, i.Netmask, i.DNS)\n\n\t\t\tif dialogs.YesNoDialog(\"Change values?\") {\n\t\t\t\tconfig.AskInterfaceParams(&i)\n\t\t\t}\n\n\t\t\tfmt.Println(\"[+] NOTE: You might need to enter your Edison board password\")\n\n\t\t\targs1 := []string{\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprintf(\"sed -i.bak -e '53 s/.*/ifconfig $IFNAME %s netmask %s/g' /etc/wpa_supplicant/wpa_cli-actions.sh\",\n\t\t\t\t\ti.Address, i.Netmask),\n\t\t\t}\n\n\t\t\targs2 := []string{\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprintf(\"sed -i -e '54i route add default gw %s' /etc/wpa_supplicant/wpa_cli-actions.sh\",\n\t\t\t\t\ti.Gateway),\n\t\t\t}\n\n\t\t\targs3 := []string{\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprintf(\"echo nameserver %s > /etc/resolv.conf\", i.DNS),\n\t\t\t}\n\t\t\tifaceDown := []string{\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprint(\"ifconfig wlan0 down\"),\n\t\t\t}\n\n\t\t\tifaceUp := []string{\n\t\t\t\t\"-o\",\n\t\t\t\t\"StrictHostKeyChecking=no\",\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprint(\"ifconfig wlan0 up\"),\n\t\t\t}\n\t\t\tfmt.Println(\"[+] Updating network configuration\")\n\t\t\tif err := help.ExecStandardStd(\"ssh\", args1...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Println(\"[+] Updating gateway settings\")\n\t\t\tif err := help.ExecStandardStd(\"ssh\", args2...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Println(\"[+] Adding custom nameserver\")\n\t\t\tif err := help.ExecStandardStd(\"ssh\", args3...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Println(\"[+] Reloading interface settings\")\n\t\t\tif err := help.ExecStandardStd(\"ssh\", ifaceDown...); err != nil {\n\t\t\t\tfmt.Println(\"[-] Error shutting down wlan0 interface: \", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tif err := help.ExecStandardStd(\"ssh\", ifaceUp...); err != nil {\n\t\t\t\tfmt.Println(\"[-] Error starting wlan0 interface: \", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n\n}", "func Associate(client *gophercloud.ServiceClient, serverId, fip string) AssociateResult {\n\tvar res AssociateResult\n\n\taddFloatingIp := make(map[string]interface{})\n\taddFloatingIp[\"address\"] = fip\n\treqBody := map[string]interface{}{\"addFloatingIp\": addFloatingIp}\n\n\t_, res.Err = client.Post(associateURL(client, serverId), reqBody, nil, nil)\n\treturn res\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv4LabeledUnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (i *IPInterface) Setup(cmd *exec.Cmd, s *Silo, index int) error {\n\tif i.InternetAccess {\n\t\tforwardingEnabled, err := colex.IPv4ForwardingEnabled()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !forwardingEnabled {\n\t\t\treturn errors.New(\"ipv4 forwarding not enabled in kernel, required\")\n\t\t}\n\t}\n\n\ti.bridgeName = fmt.Sprintf(\"b%d-%s\", index, s.IDHex)\n\ti.hostVeth = fmt.Sprintf(\"v%d-%sh\", index, s.IDHex)\n\ti.siloVeth = fmt.Sprintf(\"v%d-%ss\", index, s.IDHex)\n\tbridge, err := colex.CreateNetBridge(i.bridgeName, i.BridgeIP, &net.IPNet{Mask: i.BridgeMask})\n\tif err != nil {\n\t\treturn err\n\t}\n\thostDev, siloDev, err := colex.CreateVethPair(fmt.Sprintf(\"v%d-%s\", index, s.IDHex))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = colex.AttachNetBridge(bridge, hostDev)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = colex.MoveVethToNamespace(siloDev, cmd.Process.Pid)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnamespaceNet, err := colex.NamespaceNetOpen(cmd.Process.Pid)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\tdefer namespaceNet.Close()\n\terr = namespaceNet.LinkAddAddress(i.siloVeth, i.SiloIP, i.SiloMask)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = namespaceNet.LinkSetState(i.siloVeth, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// setup networking rules\n\ti.ipt, err = iptables.New()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif i.InternetAccess {\n\t\terr = i.ipt.AppendUnique(\"nat\", \"POSTROUTING\", \"-m\", \"physdev\", \"--physdev-in\", i.hostVeth, \"-j\", \"MASQUERADE\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ti.isSetup = true\n\treturn nil\n}", "func (hd *Datapath) UpdateNatPool(np *netproto.NatPool, vrf *netproto.Vrf) error {\n\t// This will ensure that only one datapath config will be active at a time. This is a temporary restriction\n\t// to ensure that HAL will use a single config thread , this will be removed prior to FCS to allow parallel configs to go through.\n\t// TODO Remove Global Locking\n\thd.Lock()\n\tdefer hd.Unlock()\n\tvrfKey := &halproto.VrfKeyHandle{\n\t\tKeyOrHandle: &halproto.VrfKeyHandle_VrfId{\n\t\t\tVrfId: vrf.Status.VrfID,\n\t\t},\n\t}\n\n\tipRange := strings.Split(np.Spec.IPRange, \"-\")\n\tif len(ipRange) != 2 {\n\t\treturn fmt.Errorf(\"could not parse IP Range from the NAT Pool IPRange. {%v}\", np.Spec.IPRange)\n\t}\n\n\tstartIP := net.ParseIP(strings.TrimSpace(ipRange[0]))\n\tif len(startIP) == 0 {\n\t\treturn fmt.Errorf(\"could not parse IP from {%v}\", startIP)\n\t}\n\tendIP := net.ParseIP(strings.TrimSpace(ipRange[1]))\n\tif len(endIP) == 0 {\n\t\treturn fmt.Errorf(\"could not parse IP from {%v}\", endIP)\n\t}\n\n\tlowIP := halproto.IPAddress{\n\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\tV4OrV6: &halproto.IPAddress_V4Addr{\n\t\t\tV4Addr: ipv4Touint32(startIP),\n\t\t},\n\t}\n\n\thighIP := halproto.IPAddress{\n\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\tV4OrV6: &halproto.IPAddress_V4Addr{\n\t\t\tV4Addr: ipv4Touint32(endIP),\n\t\t},\n\t}\n\n\taddrRange := &halproto.Address_Range{\n\t\tRange: &halproto.AddressRange{\n\t\t\tRange: &halproto.AddressRange_Ipv4Range{\n\t\t\t\tIpv4Range: &halproto.IPRange{\n\t\t\t\t\tLowIpaddr: &lowIP,\n\t\t\t\t\tHighIpaddr: &highIP,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tnatPoolUpdateReqMsg := &halproto.NatPoolRequestMsg{\n\t\tRequest: []*halproto.NatPoolSpec{\n\t\t\t{\n\t\t\t\tKeyOrHandle: &halproto.NatPoolKeyHandle{\n\t\t\t\t\tKeyOrHandle: &halproto.NatPoolKeyHandle_PoolKey{\n\t\t\t\t\t\tPoolKey: &halproto.NatPoolKey{\n\t\t\t\t\t\t\tVrfKh: vrfKey,\n\t\t\t\t\t\t\tPoolId: np.Status.NatPoolID,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAddress: []*halproto.Address{\n\t\t\t\t\t{\n\t\t\t\t\t\tAddress: addrRange,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif hd.Kind == \"hal\" {\n\t\tresp, err := hd.Hal.Natclient.NatPoolUpdate(context.Background(), natPoolUpdateReqMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error updating nat pool. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tif resp.Response[0].ApiStatus != halproto.ApiStatus_API_STATUS_OK {\n\t\t\tlog.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t\treturn fmt.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t}\n\t} else {\n\t\t_, err := hd.Hal.Natclient.NatPoolUpdate(context.Background(), natPoolUpdateReqMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error updating nat pool. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func configureIPsAndSubnets(node *libapi.Node, k8sNode *v1.Node, getInterfaces func([]string, []string, int) ([]autodetection.Interface, error)) (bool, error) {\n\t// If the node resource currently has no BGP configuration, add an empty\n\t// set of configuration as it makes the processing below easier, and we\n\t// must end up configuring some BGP fields before we complete.\n\tif node.Spec.BGP == nil {\n\t\tlog.Info(\"Initialize BGP data\")\n\t\tnode.Spec.BGP = &libapi.NodeBGPSpec{}\n\t}\n\n\toldIpv4 := node.Spec.BGP.IPv4Address\n\toldIpv6 := node.Spec.BGP.IPv6Address\n\n\t// Determine the autodetection type for IPv4 and IPv6. Note that we\n\t// only autodetect IPv4 when it has not been specified. IPv6 must be\n\t// explicitly requested using the \"autodetect\" value.\n\t//\n\t// If we aren't auto-detecting then we need to validate the configured\n\t// value and possibly fix up missing subnet configuration.\n\tipv4Env := os.Getenv(\"IP\")\n\tif ipv4Env == \"autodetect\" || (ipv4Env == \"\" && node.Spec.BGP.IPv4Address == \"\") {\n\t\tadm := os.Getenv(\"IP_AUTODETECTION_METHOD\")\n\t\tcidr := autodetection.AutoDetectCIDR(adm, 4, k8sNode, getInterfaces)\n\t\tif cidr != nil {\n\t\t\t// We autodetected an IPv4 address so update the value in the node.\n\t\t\tnode.Spec.BGP.IPv4Address = cidr.String()\n\t\t} else if node.Spec.BGP.IPv4Address == \"\" {\n\t\t\t// No IPv4 address is configured, but we always require one, so exit.\n\t\t\tlog.Warn(\"Couldn't autodetect an IPv4 address. If auto-detecting, choose a different autodetection method. Otherwise provide an explicit address.\")\n\t\t\treturn false, fmt.Errorf(\"Failed to autodetect an IPv4 address\")\n\t\t} else {\n\t\t\t// No IPv4 autodetected, but a previous one was configured.\n\t\t\t// Tell the user we are leaving the value unchanged. We\n\t\t\t// will validate that the IP matches one on the interface.\n\t\t\tlog.Warnf(\"Autodetection of IPv4 address failed, keeping existing value: %s\", node.Spec.BGP.IPv4Address)\n\t\t\tvalidateIP(node.Spec.BGP.IPv4Address)\n\t\t}\n\t} else if ipv4Env == \"none\" && node.Spec.BGP.IPv4Address != \"\" {\n\t\tlog.Infof(\"Autodetection for IPv4 disabled, keeping existing value: %s\", node.Spec.BGP.IPv4Address)\n\t\tvalidateIP(node.Spec.BGP.IPv4Address)\n\t} else if ipv4Env != \"none\" {\n\t\tif ipv4Env != \"\" {\n\t\t\t// Attempt to get the local CIDR of ipv4Env\n\t\t\tipv4CIDROrIP, err := autodetection.GetLocalCIDR(ipv4Env, 4, getInterfaces)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warnf(\"Attempt to get the local CIDR: %s failed, %s\", ipv4Env, err)\n\t\t\t}\n\t\t\tnode.Spec.BGP.IPv4Address = parseIPEnvironment(\"IP\", ipv4CIDROrIP, 4)\n\t\t}\n\t\tvalidateIP(node.Spec.BGP.IPv4Address)\n\t}\n\n\tipv6Env := os.Getenv(\"IP6\")\n\tif ipv6Env == \"autodetect\" {\n\t\tadm := os.Getenv(\"IP6_AUTODETECTION_METHOD\")\n\t\tcidr := autodetection.AutoDetectCIDR(adm, 6, k8sNode, getInterfaces)\n\t\tif cidr != nil {\n\t\t\t// We autodetected an IPv6 address so update the value in the node.\n\t\t\tnode.Spec.BGP.IPv6Address = cidr.String()\n\t\t} else if node.Spec.BGP.IPv6Address == \"\" {\n\t\t\t// No IPv6 address is configured, but we have requested one, so exit.\n\t\t\tlog.Warn(\"Couldn't autodetect an IPv6 address. If auto-detecting, choose a different autodetection method. Otherwise provide an explicit address.\")\n\t\t\treturn false, fmt.Errorf(\"Failed to autodetect an IPv6 address\")\n\t\t} else {\n\t\t\t// No IPv6 autodetected, but a previous one was configured.\n\t\t\t// Tell the user we are leaving the value unchanged. We\n\t\t\t// will validate that the IP matches one on the interface.\n\t\t\tlog.Warnf(\"Autodetection of IPv6 address failed, keeping existing value: %s\", node.Spec.BGP.IPv6Address)\n\t\t\tvalidateIP(node.Spec.BGP.IPv6Address)\n\t\t}\n\t} else if ipv6Env == \"none\" && node.Spec.BGP.IPv6Address != \"\" {\n\t\tlog.Infof(\"Autodetection for IPv6 disabled, keeping existing value: %s\", node.Spec.BGP.IPv6Address)\n\t\tvalidateIP(node.Spec.BGP.IPv6Address)\n\t} else if ipv6Env != \"none\" {\n\t\tif ipv6Env != \"\" {\n\t\t\tnode.Spec.BGP.IPv6Address = parseIPEnvironment(\"IP6\", ipv6Env, 6)\n\t\t}\n\t\tvalidateIP(node.Spec.BGP.IPv6Address)\n\t}\n\n\t// Detect if we've seen the IP address change, and flag that we need to check for conflicting Nodes\n\tif node.Spec.BGP.IPv4Address != oldIpv4 {\n\t\tlog.Info(\"Node IPv4 changed, will check for conflicts\")\n\t\treturn true, nil\n\t}\n\tif node.Spec.BGP.IPv6Address != oldIpv6 {\n\t\tlog.Info(\"Node IPv6 changed, will check for conflicts\")\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv4Unicast(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv4unicastgroupIpv4UnicastBodyParam FrinxOpenconfigBgpBgpcommonmpipv4unicastgroupIpv4UnicastRequest, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv4unicastgroupIpv4UnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv4LabeledUnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (test *Test) CreateOrUpdateIP(projectName string, ip models.IP) error {\n\treturn nil\n}", "func (hd *Datapath) CreateNatBinding(nb *netproto.NatBinding, np *netproto.NatPool, natPoolVrfID uint64, vrf *netproto.Vrf) (*netproto.NatBinding, error) {\n\t// This will ensure that only one datapath config will be active at a time. This is a temporary restriction\n\t// to ensure that HAL will use a single config thread , this will be removed prior to FCS to allow parallel configs to go through.\n\t// TODO Remove Global Locking\n\thd.Lock()\n\tdefer hd.Unlock()\n\tvrfKey := &halproto.VrfKeyHandle{\n\t\tKeyOrHandle: &halproto.VrfKeyHandle_VrfId{\n\t\t\tVrfId: vrf.Status.VrfID,\n\t\t},\n\t}\n\n\tnatBindingIP := net.ParseIP(nb.Spec.IPAddress)\n\tif len(natBindingIP) == 0 {\n\t\tlog.Errorf(\"could not parse IP from {%v}\", natBindingIP)\n\t\treturn nil, ErrIPParse\n\t}\n\n\tipAddr := &halproto.IPAddress{\n\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\tV4OrV6: &halproto.IPAddress_V4Addr{\n\t\t\tV4Addr: ipv4Touint32(natBindingIP),\n\t\t},\n\t}\n\n\tnatPoolVrfKey := &halproto.VrfKeyHandle{\n\t\tKeyOrHandle: &halproto.VrfKeyHandle_VrfId{\n\t\t\tVrfId: natPoolVrfID,\n\t\t},\n\t}\n\n\tnatPoolKey := &halproto.NatPoolKeyHandle{\n\t\tKeyOrHandle: &halproto.NatPoolKeyHandle_PoolKey{\n\t\t\tPoolKey: &halproto.NatPoolKey{\n\t\t\t\tVrfKh: natPoolVrfKey,\n\t\t\t\tPoolId: np.Status.NatPoolID,\n\t\t\t},\n\t\t},\n\t}\n\n\tnatBindingReqMsg := &halproto.NatMappingRequestMsg{\n\t\tRequest: []*halproto.NatMappingSpec{\n\t\t\t{\n\t\t\t\tKeyOrHandle: &halproto.NatMappingKeyHandle{\n\t\t\t\t\tKeyOrHandle: &halproto.NatMappingKeyHandle_Svc{\n\t\t\t\t\t\tSvc: &halproto.Svc{\n\t\t\t\t\t\t\tVrfKh: vrfKey,\n\t\t\t\t\t\t\tIpAddr: ipAddr,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tNatPool: natPoolKey,\n\t\t\t\tBidir: true, // Set Bidirectional to true inorder to create forward and reverse mappings.\n\t\t\t},\n\t\t},\n\t}\n\n\tif hd.Kind == \"hal\" {\n\t\tresp, err := hd.Hal.Natclient.NatMappingCreate(context.Background(), natBindingReqMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error creating nat pool. Err: %v\", err)\n\t\t\treturn nb, err\n\t\t}\n\t\tif !(resp.Response[0].ApiStatus == halproto.ApiStatus_API_STATUS_OK || resp.Response[0].ApiStatus == halproto.ApiStatus_API_STATUS_EXISTS_ALREADY) {\n\t\t\tlog.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t\treturn nb, fmt.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t}\n\t\tipv4Int := resp.Response[0].Status.MappedIp.GetV4Addr()\n\t\tip := uint32ToIPv4(ipv4Int)\n\t\tnb.Status.NatIP = ip.String()\n\t\treturn nb, nil\n\t}\n\t_, err := hd.Hal.Natclient.NatMappingCreate(context.Background(), natBindingReqMsg)\n\tif err != nil {\n\t\tlog.Errorf(\"Error creating nat pool. Err: %v\", err)\n\t\treturn nb, err\n\t}\n\treturn nb, nil\n\n}", "func AllowIPForEnvironment(userName *string, environment, profile string, extraPorts config.ExtraPorts, cfg *config.Config) error {\n\treturn changeIPsForEnvironment(true, userName, environment, profile, extraPorts, cfg)\n}", "func (px *PXE) StartDHCP(iface string, ip net.IP) {\n\n\tvar netmask net.IP\n\tif px.selfNet.IsUnspecified() {\n\t\tnetmask = net.ParseIP(\"255.255.255.0\").To4()\n\t} else {\n\t\tnetmask = px.selfNet.To4()\n\t}\n\n\t// FIXME: hardcoded value\n\tpx.leaseTime = time.Minute * 5\n\tleaseTime := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(leaseTime, uint32(px.leaseTime.Seconds()))\n\n\tpx.options = layers.DHCPOptions{\n\t\tlayers.DHCPOption{Type: layers.DHCPOptLeaseTime, Length: 4, Data: leaseTime},\n\t\tlayers.DHCPOption{Type: layers.DHCPOptSubnetMask, Length: 4, Data: netmask.To4()},\n\t\tlayers.DHCPOption{Type: layers.DHCPOptRouter, Length: 4, Data: px.selfIP.To4()},\n\t}\n\n\tvar e error\n\n\t// Find our interface\n\tpx.iface, e = net.InterfaceByName(iface)\n\tif e != nil {\n\t\tpx.api.Logf(types.LLCRITICAL, \"%v: %s\", e, iface)\n\t\treturn\n\t}\n\n\t// We need the raw handle to send unicast packet replies\n\t// This is only used for sending initial DHCP offers\n\t// Note: 0x0800 is EtherType for ipv4. See: https://en.wikipedia.org/wiki/EtherType\n\tpx.rawHandle, e = raw.ListenPacket(px.iface, 0x0800, nil)\n\tif e != nil {\n\t\tpx.api.Logf(types.LLCRITICAL, \"%v: %s\", e, iface)\n\t\treturn\n\t}\n\tdefer px.rawHandle.Close()\n\n\t// We use this packetconn to read from\n\tnc, e := net.ListenPacket(\"udp4\", \":67\")\n\tif e != nil {\n\t\tpx.api.Logf(types.LLCRITICAL, \"%v\", e)\n\t\treturn\n\t}\n\tc := ipv4.NewPacketConn(nc)\n\tdefer c.Close()\n\tpx.api.Logf(types.LLINFO, \"started DHCP listener on: %s\", iface)\n\n\t// main read loop\n\tfor {\n\t\tbuffer := make([]byte, DHCPPacketBuffer)\n\t\tvar req layers.DHCPv4\n\t\tparser := gopacket.NewDecodingLayerParser(layers.LayerTypeDHCPv4, &req)\n\t\tdecoded := []gopacket.LayerType{}\n\n\t\tn, _, addr, e := c.ReadFrom(buffer)\n\t\tif e != nil {\n\t\t\tpx.api.Logf(types.LLCRITICAL, \"%v\", e)\n\t\t\tbreak\n\t\t}\n\t\tpx.api.Logf(types.LLDDEBUG, \"got a dhcp packet from: %s\", addr.String())\n\t\tif n < 240 {\n\t\t\tpx.api.Logf(types.LLDDEBUG, \"packet is too short: %d < 240\", n)\n\t\t\tcontinue\n\t\t}\n\n\t\tif e = parser.DecodeLayers(buffer[:n], &decoded); e != nil {\n\t\t\tpx.api.Logf(types.LLERROR, \"error decoding packet: %v\", e)\n\t\t\tcontinue\n\t\t}\n\t\tif len(decoded) < 1 || decoded[0] != layers.LayerTypeDHCPv4 {\n\t\t\tpx.api.Logf(types.LLERROR, \"decoded non-DHCP packet\")\n\t\t\tcontinue\n\t\t}\n\t\t// at this point we have a parsed DHCPv4 packet\n\n\t\tif req.Operation != layers.DHCPOpRequest {\n\t\t\t// odd...\n\t\t\tcontinue\n\t\t}\n\t\tif req.HardwareLen > 16 {\n\t\t\tpx.api.Logf(types.LLDDEBUG, \"packet HardwareLen too long: %d > 16\", req.HardwareLen)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo px.handleDHCPRequest(req)\n\t}\n\tpx.api.Log(types.LLNOTICE, \"DHCP stopped.\")\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv4UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest1, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv4LabeledUnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest10, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (ic *ifConfigurator) advertiseContainerAddr(containerNetNS string, containerIfaceName string, result *current.Result) error {\n\tif err := nsIsNSorErr(containerNetNS); err != nil {\n\t\treturn fmt.Errorf(\"%s is not a valid network namespace: %v\", containerNetNS, err)\n\t}\n\tif len(result.IPs) == 0 {\n\t\tklog.Warningf(\"Expected at least one IP address in CNI result, skip sending Gratuitous ARP\")\n\t\treturn nil\n\t}\n\t// Sending Gratuitous ARP is a best-effort action and is unlikely to fail as we have ensured the netns is valid.\n\tgo nsWithNetNSPath(containerNetNS, func(_ ns.NetNS) error {\n\t\tiface, err := netInterfaceByName(containerIfaceName)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to find container interface %s in ns %s: %v\", containerIfaceName, containerNetNS, err)\n\t\t\treturn nil\n\t\t}\n\t\tvar targetIPv4, targetIPv6 net.IP\n\t\tfor _, ipc := range result.IPs {\n\t\t\tif ipc.Address.IP.To4() != nil {\n\t\t\t\ttargetIPv4 = ipc.Address.IP\n\t\t\t} else {\n\t\t\t\ttargetIPv6 = ipc.Address.IP\n\t\t\t}\n\t\t}\n\t\tif targetIPv4 == nil && targetIPv6 == nil {\n\t\t\tklog.V(2).Infof(\"No IPv4 and IPv6 address found for container interface %s in ns %s, skip sending Gratuitous ARP/NDP\", containerIfaceName, containerNetNS)\n\t\t\treturn nil\n\t\t}\n\t\tticker := time.NewTicker(50 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tcount := 0\n\t\tfor {\n\t\t\t// Send gratuitous ARP/NDP to network in case of stale mappings for this IP address\n\t\t\t// (e.g. if a previous - deleted - Pod was using the same IP).\n\t\t\tif targetIPv4 != nil {\n\t\t\t\tif err := arpingGratuitousARPOverIface(targetIPv4, iface); err != nil {\n\t\t\t\t\tklog.Warningf(\"Failed to send gratuitous ARP #%d: %v\", count, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif targetIPv6 != nil {\n\t\t\t\tif err := ndpGratuitousNDPOverIface(targetIPv6, iface); err != nil {\n\t\t\t\t\tklog.Warningf(\"Failed to send gratuitous NDP #%d: %v\", count, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcount++\n\t\t\tif count == 3 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t<-ticker.C\n\t\t}\n\t\treturn nil\n\t})\n\treturn nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv4UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest7, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiIpv4LabeledUnicast(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv4labeledunicastgroupIpv4LabeledUnicastBodyParam FrinxOpenconfigBgpBgpcommonmpipv4labeledunicastgroupIpv4LabeledUnicastRequest2, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-labeled-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv4labeledunicastgroupIpv4LabeledUnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv4UnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest1, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func ConfigureIPAddress(success chan bool) {\n\tsetKVPForIpAddress := prepareKeyValuePair(\"minishift\", \"IpAddress\", \"10.0.75.128\")\n\n\tposh := New()\n\tresult, _ := posh.Execute(setKVPForIpAddress)\n\t\n\tif (strings.Contains(result, \"4096\")) {\n\t\tsuccess <- true\n\t}\n}", "func (m *UserSimulationEventInfo) SetIpAddress(value *string)() {\n m.ipAddress = value\n}", "func (c *Client) setupPolicyOnlyMode() error {\n\tgwLink := util.GetNetLink(c.nodeConfig.GatewayConfig.Name)\n\t_, gwIP, _ := net.ParseCIDR(fmt.Sprintf(\"%s/32\", c.nodeConfig.NodeIPAddr.IP.String()))\n\tif err := netlink.AddrReplace(gwLink, &netlink.Addr{IPNet: gwIP}); err != nil {\n\t\treturn fmt.Errorf(\"failed to add address %s to gw %s: %v\", gwIP, gwLink.Attrs().Name, err)\n\t}\n\n\t// Add default route to service table.\n\t_, defaultRt, _ := net.ParseCIDR(\"0/0\")\n\tnhIP := net.ParseIP(svcTblVirtualDefaultGWIP)\n\troute := &netlink.Route{\n\t\tLinkIndex: gwLink.Attrs().Index,\n\t\tTable: c.serviceRtTable.Idx,\n\t\tFlags: int(netlink.FLAG_ONLINK),\n\t\tDst: defaultRt,\n\t\tGw: nhIP,\n\t}\n\tif err := netlink.RouteReplace(route); err != nil {\n\t\treturn fmt.Errorf(\"failed to add default route to service table: %v\", err)\n\t}\n\t// Add static neighbor to next hop so that no ARPING is ever required on antrea-gw0.\n\tnhMAC, _ := c.resolveDefaultRouteNHMAC()\n\tneigh := &netlink.Neigh{\n\t\tLinkIndex: gwLink.Attrs().Index,\n\t\tFamily: netlink.FAMILY_V4,\n\t\tState: netlink.NUD_PERMANENT,\n\t\tIP: nhIP,\n\t\tHardwareAddr: nhMAC,\n\t}\n\tif err := netlink.NeighSet(neigh); err != nil {\n\t\treturn fmt.Errorf(\"failed to add neigh %v to gw %s: %v\", neigh, gwLink.Attrs().Name, err)\n\t}\n\treturn nil\n}", "func (client *Client) AllowIPAddressesOnSnatBridge() error {\n\tif err := networkutils.AllowIPAddresses(SnatBridgeName, client.SkipAddressesFromBlock, iptables.Insert); err != nil {\n\t\tlog.Printf(\"AllowIPAddresses failed with error %v\", err)\n\t\treturn newErrorSnatClient(err.Error())\n\t}\n\n\treturn nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv6UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest9, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv6-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv6LabeledUnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest2, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv6-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func AssignIPAddress(deviceID, ipAddress string) error {\n\tclient, err := NewExtPacketClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := extpackngo.IPAddressAssignRequest{\n\t\tAddress: ipAddress,\n\t}\n\n\tip, _, err := client.IPs.Assign(deviceID, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te := MarshallAndPrint(ip)\n\treturn e\n}", "func (s *GRPCServerHandler) AddNetwork(context context.Context, in *rpc.AddNetworkRequest) (*rpc.AddNetworkReply, error) {\n\tklog.V(1).Infof(\"Received AddNetwork for NS %s, Pod %s, NameSpace %s, Container %s, ifname %s\",\n\t\tin.Netns, in.K8S_POD_NAME, in.K8S_POD_NAMESPACE, in.K8S_POD_INFRA_CONTAINER_ID, in.IfName)\n\n\taddr, deviceNumber, err := s.ipamd.dataStore.AssignPodIPv4Address(&k8sapi.K8SPodInfo{\n\t\tName: in.K8S_POD_NAME,\n\t\tNamespace: in.K8S_POD_NAMESPACE,\n\t\tContainer: in.K8S_POD_INFRA_CONTAINER_ID})\n\n\tsubnets := make([]string, 0)\n\tfor _, subnet := range s.ipamd.vpcSubnets() {\n\t\tsubnets = append(subnets, *subnet)\n\t}\n\tif s.ipamd.supportVPNTraffic {\n\t\tvpnNet := *s.ipamd.vpc.Network\n\t\tvpnNet.IP[2] = 255\n\t\tvpnNet.Mask = net.IPv4Mask(255, 255, 255, 0)\n\t\tsubnets = append(subnets, vpnNet.String())\n\t}\n\n\tresp := rpc.AddNetworkReply{\n\t\tSuccess: err == nil,\n\t\tIPv4Addr: addr,\n\t\tIPv4Subnet: \"\",\n\t\tDeviceNumber: int32(deviceNumber),\n\t\tUseExternalSNAT: false,\n\t\tVPCcidrs: subnets,\n\t}\n\tif err != nil {\n\t\tresp.Message = err.Error()\n\t\ts.ipamd.trigCh <- udevNotify{}\n\t}\n\tklog.V(1).Infof(\"Send AddNetworkReply: IPv4Addr %s, DeviceNumber: %d, err: %v\", addr, deviceNumber, err)\n\treturn &resp, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiL3vpnIpv6Unicast(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpl3vpnipv6unicastgroupL3vpnIpv6UnicastBodyParam FrinxOpenconfigBgpBgpcommonmpl3vpnipv6unicastgroupL3vpnIpv6UnicastRequest1, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv6-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpl3vpnipv6unicastgroupL3vpnIpv6UnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv4UnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest7, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv6Unicast(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpl3vpnipv6unicastgroupL3vpnIpv6UnicastBodyParam FrinxOpenconfigBgpBgpcommonmpl3vpnipv6unicastgroupL3vpnIpv6UnicastRequest, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv6-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpl3vpnipv6unicastgroupL3vpnIpv6UnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv4UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest11, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv4LabeledUnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest10, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiL3vpnIpv4Unicast(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpl3vpnipv4unicastgroupL3vpnIpv4UnicastBodyParam FrinxOpenconfigBgpBgpcommonmpl3vpnipv4unicastgroupL3vpnIpv4UnicastRequest2, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpl3vpnipv4unicastgroupL3vpnIpv4UnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv6LabeledUnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest2, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv6-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiIpv4LabeledUnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest20, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiIpv4Unicast(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv4unicastgroupIpv4UnicastBodyParam FrinxOpenconfigBgpBgpcommonmpipv4unicastgroupIpv4UnicastRequest2, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv4unicastgroupIpv4UnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv6LabeledUnicast(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv6labeledunicastgroupIpv6LabeledUnicastBodyParam FrinxOpenconfigBgpBgpcommonmpipv6labeledunicastgroupIpv6LabeledUnicastRequest1, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv6-labeled-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv6labeledunicastgroupIpv6LabeledUnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv4MulticastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest6, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-multicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv6UnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest9, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv6-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiIpv4LabeledUnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest20, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv6UnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest3, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv6-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiL3vpnIpv4UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest17, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL2vpnEvpnPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest4, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l2vpn-evpn/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv6UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest3, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv6-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func getGatewayIP(ifaceName string) string {\n\tcmd := exec.Command(\"ip\", \"route\", \"show\", \"dev\", ifaceName)\n\tlog.Tracef(\"executing %s %v\", cmd.Path, cmd.Args)\n\td, err := cmd.Output()\n\tif err != nil || cmd.ProcessState.ExitCode() != 0 {\n\t\treturn \"\"\n\t}\n\n\tfields := strings.Fields(string(d))\n\tif len(fields) < 3 || fields[0] != \"default\" {\n\t\treturn \"\"\n\t}\n\n\tip := net.ParseIP(fields[2])\n\tif ip == nil {\n\t\treturn \"\"\n\t}\n\n\treturn fields[2]\n}", "func AllowIPForEnvironment(sshUser, environment, profile string, extraPorts config.ExtraPorts) error {\n\treturn changeIPsForEnvironment(true, sshUser, environment, profile, extraPorts)\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv6LabeledUnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest12, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv6-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv4UnicastConfig(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv4ipv6unicastcommonConfigBodyParam FrinxOpenconfigBgpBgpcommonmpipv4ipv6unicastcommonConfigRequest2, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv4ipv6unicastcommonConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (client *IntegrationRuntimeNodesClient) getIPAddressCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, integrationRuntimeName string, nodeName string, options *IntegrationRuntimeNodesClientGetIPAddressOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/integrationRuntimes/{integrationRuntimeName}/nodes/{nodeName}/ipAddress\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif integrationRuntimeName == \"\" {\n\t\treturn nil, errors.New(\"parameter integrationRuntimeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{integrationRuntimeName}\", url.PathEscape(integrationRuntimeName))\n\tif nodeName == \"\" {\n\t\treturn nil, errors.New(\"parameter nodeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{nodeName}\", url.PathEscape(nodeName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv4UnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest11, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv4MulticastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest6, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-multicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv6MulticastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest8, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv6-multicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiL3vpnIpv6UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest19, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv6-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func natActivate(ctx *zedrouterContext,\n\tstatus *types.NetworkInstanceStatus) error {\n\n\tlog.Infof(\"natActivate(%s)\\n\", status.DisplayName)\n\tsubnetStr := status.Subnet.String()\n\n\tfor _, a := range status.IfNameList {\n\t\tlog.Infof(\"Adding iptables rules for %s \\n\", a)\n\t\terr := iptables.IptableCmd(\"-t\", \"nat\", \"-A\", \"POSTROUTING\", \"-o\", a,\n\t\t\t\"-s\", subnetStr, \"-j\", \"MASQUERADE\")\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"IptableCmd failed: %s\", err)\n\t\t\treturn err\n\t\t}\n\t\terr = PbrRouteAddDefault(status.BridgeName, a)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"PbrRouteAddDefault for Bridge(%s) and interface %s failed. \"+\n\t\t\t\t\"Err: %s\", status.BridgeName, a, err)\n\t\t\treturn err\n\t\t}\n\t}\n\t// Add to Pbr table\n\terr := PbrNATAdd(subnetStr)\n\tif err != nil {\n\t\tlog.Errorf(\"PbrNATAdd failed for port %s - err = %s\\n\", status.Port, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func HandleIPRequest(router *mux.Router, rootPath string) {\n\trouter.\n\t\tMethods(http.MethodGet).\n\t\tPath(rootPath).\n\t\tHeadersRegexp(\"Accept\", \".*((application/((xhtml+)?xml|json|javascript))|(text/x?html)).*\").\n\t\tHandlerFunc(provideIP).\n\t\tName(\"ip\")\n}", "func (m *ServicePrincipalRiskDetection) SetIpAddress(value *string)() {\n err := m.GetBackingStore().Set(\"ipAddress\", value)\n if err != nil {\n panic(err)\n }\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL2vpnEvpnPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest4, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l2vpn-evpn/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv6LabeledUnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest12, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv6-labeled-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func setPrimaryIP(name string, node *goeapi.Node, ip string) error {\n\tiface := module.IPInterface(node)\n\tif !iface.SetAddress(name, ip) {\n\t\treturn fmt.Errorf(\"failed to configure IP %s on interface %s.\", ip, name)\n\t}\n\treturn nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv4UnicastConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv4ipv6unicastcommonConfigBodyParam FrinxOpenconfigBgpBgpcommonmpipv4ipv6unicastcommonConfigRequest, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv4ipv6unicastcommonConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiIpv6UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest13, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv6-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv6MulticastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest8, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv6-multicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL2vpnVplsPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest5, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l2vpn-vpls/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func ifaceAdd(iface, ip string) error {\n\tif err := exec.Command(\"ip\", \"addr\", \"add\", addSubnet(ip), \"dev\", iface).Run(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *Client) assignIPsToEni(eniID string, ipNum int) error {\n\treq := vpc.NewAssignPrivateIpAddressesRequest()\n\treq.NetworkInterfaceId = common.StringPtr(eniID)\n\treq.SecondaryPrivateIpAddressCount = common.Uint64Ptr(uint64(ipNum))\n\n\tblog.V(2).Infof(\"tencentcloud AssignPrivateIpAddresses request %s\", req.ToJsonString())\n\n\tresp, err := c.vpcClient.AssignPrivateIpAddresses(req)\n\tif err != nil {\n\t\tblog.Errorf(\"tencentcloud AssignPrivateIpAddresses request %s\", req.ToJsonString())\n\t\treturn fmt.Errorf(\"tencentcloud AssignPrivateIpAddresses request %s\", req.ToJsonString())\n\t}\n\n\tblog.V(2).Infof(\"tencentcloud AssignPrivateIpAddresses response %s\", resp.ToJsonString())\n\treturn nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiL3vpnIpv4UnicastPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest17, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv6LabeledUnicast(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpipv6labeledunicastgroupIpv6LabeledUnicastBodyParam FrinxOpenconfigBgpBgpcommonmpipv6labeledunicastgroupIpv6LabeledUnicastRequest, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv6-labeled-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpipv6labeledunicastgroupIpv6LabeledUnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func TestNatBidingToRemoteNatPool(t *testing.T) {\n\t// create netagent\n\tag, _, _ := state.createNetAgent(t)\n\tAssert(t, ag != nil, \"Failed to create agent %#v\", ag)\n\tdefer ag.Stop()\n\t// create backing remote Namespace and NatPool\n\trns := netproto.Namespace{\n\t\tTypeMeta: api.TypeMeta{Kind: \"Namespace\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tTenant: \"default\",\n\t\t\tName: \"remoteNamespace\",\n\t\t},\n\t}\n\terr := ag.CreateNamespace(&rns)\n\tAssertOk(t, err, \"Could not create remote namespace\")\n\n\tnp := netproto.NatPool{\n\t\tTypeMeta: api.TypeMeta{Kind: \"NatPool\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tTenant: \"default\",\n\t\t\tNamespace: \"remoteNamespace\",\n\t\t\tName: \"remoteNamespaceNatPool\",\n\t\t},\n\t\tSpec: netproto.NatPoolSpec{\n\t\t\tIPRange: \"10.0.0.1-10.1.1.100\",\n\t\t},\n\t}\n\n\t// create nat pool\n\terr = ag.CreateNatPool(&np)\n\tAssertOk(t, err, \"Error creating nat pool\")\n\n\tnb := netproto.NatBinding{\n\t\tTypeMeta: api.TypeMeta{Kind: \"NatBinding\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tTenant: \"default\",\n\t\t\tNamespace: \"default\",\n\t\t\tName: \"updateNatBinding\",\n\t\t},\n\t\tSpec: netproto.NatBindingSpec{\n\t\t\tNatPoolName: \"remoteNamespace/remoteNamespaceNatPool\",\n\t\t\tIPAddress: \"10.1.1.1\",\n\t\t},\n\t}\n\n\t// create nat binding\n\terr = ag.CreateNatBinding(&nb)\n\tAssertOk(t, err, \"Error creating nat binding\")\n\tnatPool, err := ag.FindNatBinding(nb.ObjectMeta)\n\tAssertOk(t, err, \"NatBinding was not found in DB\")\n\tAssert(t, natPool.Name == \"updateNatBinding\", \"Nat Pool names did not match\", natPool)\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiL3vpnIpv4MulticastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest16, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-multicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisGlobalIgpShortcutsAfi(ctx context.Context, name string, identifier string, protocolName string, afiName string, frinxOpenconfigIsisIsisshortcutsafilistAfiBodyParam FrinxOpenconfigIsisIsisshortcutsafilistAfiRequest, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:igp-shortcuts/frinx-openconfig-network-instance:afi/{afi-name}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-name\"+\"}\", fmt.Sprintf(\"%v\", afiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigIsisIsisshortcutsafilistAfiBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiL2vpnEvpnPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest14, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l2vpn-evpn/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiL3vpnIpv6Unicast(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpl3vpnipv6unicastgroupL3vpnIpv6UnicastBodyParam FrinxOpenconfigBgpBgpcommonmpl3vpnipv6unicastgroupL3vpnIpv6UnicastRequest2, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv6-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpl3vpnipv6unicastgroupL3vpnIpv6UnicastBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func validateIP(ipn string) {\n\t// No validation required if no IP address is specified.\n\tif ipn == \"\" {\n\t\treturn\n\t}\n\n\tipAddr, _, err := cnet.ParseCIDROrIP(ipn)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"Failed to parse autodetected CIDR '%s'\", ipn)\n\t\tutils.Terminate()\n\t}\n\n\t// Get a complete list of interfaces with their addresses and check if\n\t// the IP address can be found.\n\tifaces, err := autodetection.GetInterfaces(net.Interfaces, nil, nil, ipAddr.Version())\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Unable to query host interfaces\")\n\t\tutils.Terminate()\n\t}\n\tif len(ifaces) == 0 {\n\t\tlog.Info(\"No interfaces found for validating IP configuration\")\n\t}\n\n\tfor _, i := range ifaces {\n\t\tfor _, c := range i.Cidrs {\n\t\t\tif ipAddr.Equal(c.IP) {\n\t\t\t\tlog.Debugf(\"IPv%d address %s discovered on interface %s\", ipAddr.Version(), ipAddr.String(), i.Name)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tlog.Warnf(\"Unable to confirm IPv%d address %s is assigned to this host\", ipAddr.Version(), ipAddr)\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiIpv4UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitRequest21, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixLimitBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL2vpnVplsPrefixLimitConfig(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam FrinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigRequest5, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l2vpn-vpls/frinx-openconfig-network-instance:prefix-limit/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigBgpBgpcommonmpallafisaficommonPrefixlimitConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}" ]
[ "0.57944995", "0.5638535", "0.5593906", "0.5586777", "0.55479777", "0.5546608", "0.54761577", "0.5467055", "0.5434431", "0.5379", "0.5344696", "0.53415555", "0.5334797", "0.53240156", "0.53156394", "0.53124017", "0.5250295", "0.52484107", "0.5246646", "0.5243997", "0.5241883", "0.52379817", "0.52046293", "0.5191389", "0.5191369", "0.518258", "0.5180996", "0.5170955", "0.51687145", "0.51633114", "0.51601195", "0.51487505", "0.5144038", "0.5140874", "0.51383823", "0.5121854", "0.5119179", "0.5117928", "0.51115453", "0.5109481", "0.5106834", "0.5106591", "0.51057065", "0.50964504", "0.5096356", "0.5090761", "0.5088823", "0.50865984", "0.5066088", "0.506607", "0.50656044", "0.50603604", "0.5058025", "0.50561225", "0.5050341", "0.5049078", "0.50457895", "0.5045757", "0.50386065", "0.5034982", "0.50221395", "0.5020716", "0.502037", "0.50189483", "0.50169635", "0.50129336", "0.5012825", "0.50125456", "0.50084984", "0.5008372", "0.50066346", "0.50011015", "0.49965099", "0.49939075", "0.49922144", "0.49910954", "0.4989741", "0.49851668", "0.4982057", "0.4980331", "0.49795285", "0.49768308", "0.49648932", "0.49640775", "0.49640617", "0.49626034", "0.49573416", "0.49570978", "0.49550962", "0.49549866", "0.49542022", "0.4952829", "0.49503675", "0.49495098", "0.49470684", "0.49443883", "0.4944213", "0.49415612", "0.49402705", "0.4938903" ]
0.8086484
0
NewGobEncoderLight returns a new lockfree encoder
func NewGobEncoderLight() *GobEncoderLight { ret := &GobEncoderLight{ bytes: &bytes.Buffer{}, } ret.encoder = gob.NewEncoder(ret.bytes) return ret }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGobDecoderLight() *GobDecoderLight {\n\tret := &GobDecoderLight{\n\t\tbytes: &bytes.Buffer{},\n\t}\n\tret.decoder = gob.NewDecoder(ret.bytes)\n\treturn ret\n}", "func NewGOBCodec() *GOBCodec {\n\tr := GOBCodec(0)\n\treturn &r\n}", "func NewGobTranscoder() *GobTranscoder {\n\tret := &GobTranscoder{\n\t\tinBytes: &bytes.Buffer{},\n\t\toutBytes: &bytes.Buffer{},\n\t\tencoderMut: &sync.Mutex{},\n\t\tdecoderMut: &sync.Mutex{},\n\t}\n\tret.encoder = gob.NewEncoder(ret.outBytes)\n\tret.decoder = gob.NewDecoder(ret.inBytes)\n\treturn ret\n}", "func NewEncoder() Encoder {\n return &encoder{}\n}", "func NewEncoder() *Encoder {\n\tselect {\n\tcase enc := <-encObjPool:\n\t\treturn enc\n\tdefault:\n\t\treturn &Encoder{}\n\t}\n}", "func GOB() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn gob.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn gob.NewDecoder(r)\n\t\t},\n\t)\n}", "func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}", "func (b *blockEnc) initNewEncode() {\n\tb.recentOffsets = [3]uint32{1, 4, 8}\n\tb.litEnc.Reuse = huff0.ReusePolicyNone\n\tb.coders.setPrev(nil, nil, nil)\n}", "func NewEncoder() Encoder {\n\treturn &encoder{\n\t\tbuf: new(bytes.Buffer),\n\t}\n}", "func NewEncoder() *Encoder {\n\treturn &Encoder{buf: &bytes.Buffer{}, tmp: &bytes.Buffer{}}\n}", "func NewEncoder() Encoder {\n\treturn &encoder{}\n}", "func NewBinaryCodec() *BinaryCodec {\n var c *BinaryCodec = &BinaryCodec{}\n c.buf = &bytes.Buffer{}\n return c\n}", "func NewGCM(b cipher.Block, tagSizeInBits int, iv []byte) (GaloisCounterMode, error) {\n if b.BlockSize() != 16 && b.BlockSize() != 18 && b.BlockSize() != 24 {\n return nil, errors.New(\"Block cipher MUST have a 128-bit block size\")\n }\n\n if tagSizeInBits <= 0 {\n tagSizeInBits = 128\n }\n\n h := make([]byte, 16)\n b.Encrypt(h, zeroes[:16])\n\n return &gcm{\n b: b,\n blockSize: b.BlockSize(),\n iv: dup(iv),\n h: h,\n tagSize: tagSizeInBits / 8,\n tmp: make([]byte, b.BlockSize()),\n }, nil\n}", "func GobGenerateEncoder(w io.Writer) Encoder {\n\treturn gob.NewEncoder(w)\n}", "func newGCM(cipher goCipher.Block) (aeadIf, error) {\n\treturn newGCMWithNonceAndTagSize(cipher, gcmStandardNonceSize, gcmTagSize)\n}", "func NewEncoder(params *Parameters) Encoder {\n\n\tvar ringQ, ringT *ring.Ring\n\tvar err error\n\n\tif ringQ, err = ring.NewRing(params.N(), params.qi); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif ringT, err = ring.NewRing(params.N(), []uint64{params.t}); err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar m, pos, index1, index2 int\n\n\tslots := params.N()\n\n\tindexMatrix := make([]uint64, slots)\n\n\tlogN := params.LogN()\n\n\trowSize := params.N() >> 1\n\tm = (params.N() << 1)\n\tpos = 1\n\n\tfor i := 0; i < rowSize; i++ {\n\n\t\tindex1 = (pos - 1) >> 1\n\t\tindex2 = (m - pos - 1) >> 1\n\n\t\tindexMatrix[i] = utils.BitReverse64(uint64(index1), uint64(logN))\n\t\tindexMatrix[i|rowSize] = utils.BitReverse64(uint64(index2), uint64(logN))\n\n\t\tpos *= GaloisGen\n\t\tpos &= (m - 1)\n\t}\n\n\treturn &encoder{\n\t\tparams: params.Copy(),\n\t\tringQ: ringQ,\n\t\tringT: ringT,\n\t\tindexMatrix: indexMatrix,\n\t\tdeltaMont: GenLiftParams(ringQ, params.t),\n\t\tscaler: ring.NewRNSScaler(params.t, ringQ),\n\t\ttmpPoly: ringT.NewPoly(),\n\t\ttmpPtRt: NewPlaintextRingT(params),\n\t}\n}", "func NewEncoder() *Encoder {\n\treturn &Encoder{\n\t\tbuf: make([]uint64, 240),\n\t\tb: make([]byte, 8),\n\t\tbytes: make([]byte, 128),\n\t}\n}", "func NewGobSerializer() gbus.Serializer {\n\treturn &Gob{\n\t\tlock: &sync.Mutex{},\n\t\tregisteredSchemas: make(map[string]reflect.Type),\n\t}\n}", "func (e Encoding) New() EncodingFunc {\n\tif e < maxEncoding {\n\t\tf := Encodings[e]\n\t\tif f != nil {\n\t\t\treturn f()\n\t\t}\n\t}\n\tlogger.Errorf(\"requested Encoding function %s is unavailable\", strconv.Itoa(int(e)))\n\treturn nil\n}", "func (x *Rat) GobEncode() ([]byte, error) {}", "func New(version string, config Config) *Encoder {\n\te := &Encoder{version: version, config: config}\n\te.reset()\n\treturn e\n}", "func (cfg frozenConfig) NewEncoder(writer io.Writer) Encoder {\n enc := encoder.NewStreamEncoder(writer)\n enc.Opts = cfg.encoderOpts\n return enc\n}", "func New() *Codec {\n\treturn &Codec{}\n}", "func New() Go { return Go{} }", "func (d *DFA) GobEncode() ([]byte, error) {\n\tbuffer := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(buffer)\n\tif err := encoder.Encode(d.initial); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode initial state\")\n\t}\n\tif err := encoder.Encode(d.table); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode sparse table\")\n\t}\n\treturn buffer.Bytes(), nil\n}", "func NewEncoder() *Encoder {\n\n\te := &Encoder{\n\t\ttagName: \"form\",\n\t\tmode: ModeImplicit,\n\t\tstructCache: newStructCacheMap(),\n\t\tembedAnonymous: true,\n\t\tnamespacePrefix: \".\",\n\t}\n\n\te.dataPool = &sync.Pool{New: func() interface{} {\n\t\treturn &encoder{\n\t\t\te: e,\n\t\t\tnamespace: make([]byte, 0, 64),\n\t\t}\n\t}}\n\n\treturn e\n}", "func NewEncoding(e Encoder, d Decoder) EncodeDecoder {\n\treturn &Encoding{\n\t\tEncoder: e,\n\t\tDecoder: d,\n\t}\n}", "func NewEncoder() (*Encoder, error) {\n\treturn &Encoder{\n\t\tbuffer: make([]byte, MaxBufferSize),\n\t\tposition: uint64(MaxBufferSize - 1),\n\t}, nil\n}", "func newJSONEncoder() *jsonEncoder {\n\tenc := jsonPool.Get().(*jsonEncoder)\n\tenc.truncate()\n\treturn enc\n}", "func (k *Key) GobEncode() ([]byte, error) {\n\treturn []byte(k.Encode()), nil\n}", "func (m *simpleMapping) NewEncoder() SimpleEncoder {\n\tm.once.Do(m.init)\n\treturn &simpleEncoding{\n\t\tbaseName: m.baseName,\n\t\tencode: m.encode,\n\t\tdecode: m.decode,\n\t}\n}", "func NewEncoding(encoder string) *Encoding {\n\te := new(Encoding)\n\te.encoder = encoder\n\n\tfor i := 0; i < len(e.decodeMap); i++ {\n\t\te.decodeMap[i] = 0xFF\n\t}\n\tfor i := 0; i < len(encoder); i++ {\n\t\te.decodeMap[encoder[i]] = byte(i)\n\t}\n\treturn e\n}", "func _Encoder_Shadow(rb *[]byte, vp unsafe.Pointer, sb *_Stack, fv uint64) (err error) {\n // align to assembler_amd64.go: _FP_offs\n var frame [_FP_offs]byte\n\n // must keep all args and frames noticeable to GC\n _KeepAlive.rb = rb\n _KeepAlive.vp = vp\n _KeepAlive.sb = sb\n _KeepAlive.fv = fv\n _KeepAlive.err = err\n _KeepAlive.frame = frame\n\n return errCallShadow\n}", "func New(config Config) zapcore.WriteSyncer {\n\treturn &gelf{Config: config}\n}", "func Constructor() Codec {\n\treturn Codec{}\n}", "func newCodecRegistry() *codecs {\n\treturn &codecs{\n\t\tmu: new(sync.RWMutex),\n\t\titems: make(map[string]codec.Interface),\n\t}\n}", "func NewGCM(\n\tkey string,\n\tpassword string,\n\tencoding encodingType,\n) (AES, error) {\n\treturn NewAES(ModeGCM, key, password, encoding)\n}", "func newCompressor(format CompressEncodingType) (compressor, error) {\n\tvar (\n\t\twriter encoder\n\t\terr error\n\t)\n\n\tswitch format {\n\tcase GZIPCompression:\n\t\twriter = gzip.NewWriter(io.Discard)\n\tcase DeflateCompression:\n\t\twriter, err = flate.NewWriter(io.Discard, flate.BestSpeed)\n\t\tif err != nil {\n\t\t\treturn compressor{}, err\n\t\t}\n\tcase NoCompression:\n\t\twriter = nil\n\tdefault:\n\t\treturn compressor{}, fmt.Errorf(\"invalid format: %s\", format)\n\t}\n\n\treturn compressor{\n\t\tformat: format,\n\t\twriter: writer,\n\t}, nil\n}", "func NewEncoder(w io.Writer, chunkSize uint16) EncoderV2 {\n\treturn EncoderV2{\n\t\tw: w,\n\t\tbuf: &bytes.Buffer{},\n\t\tchunkSize: chunkSize,\n\t}\n}", "func New(ch chan interface{}) Ctx {\n\treturn Ctx{bridge.New(ch)}\n}", "func NewGCM(key []byte) (*SafeCookie, error) {\n\tb, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcm, err := cipher.NewGCM(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &SafeCookie{AEAD: gcm}, nil\n}", "func (s *CountMinSketch) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\t_, err := s.WriteTo(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (b *blockEnc) init() {\n\tif b.lowMem {\n\t\t// 1K literals\n\t\tif cap(b.literals) < 1<<10 {\n\t\t\tb.literals = make([]byte, 0, 1<<10)\n\t\t}\n\t\tconst defSeqs = 20\n\t\tif cap(b.sequences) < defSeqs {\n\t\t\tb.sequences = make([]seq, 0, defSeqs)\n\t\t}\n\t\t// 1K\n\t\tif cap(b.output) < 1<<10 {\n\t\t\tb.output = make([]byte, 0, 1<<10)\n\t\t}\n\t} else {\n\t\tif cap(b.literals) < maxCompressedBlockSize {\n\t\t\tb.literals = make([]byte, 0, maxCompressedBlockSize)\n\t\t}\n\t\tconst defSeqs = 2000\n\t\tif cap(b.sequences) < defSeqs {\n\t\t\tb.sequences = make([]seq, 0, defSeqs)\n\t\t}\n\t\tif cap(b.output) < maxCompressedBlockSize {\n\t\t\tb.output = make([]byte, 0, maxCompressedBlockSize)\n\t\t}\n\t}\n\n\tif b.coders.mlEnc == nil {\n\t\tb.coders.mlEnc = &fseEncoder{}\n\t\tb.coders.mlPrev = &fseEncoder{}\n\t\tb.coders.ofEnc = &fseEncoder{}\n\t\tb.coders.ofPrev = &fseEncoder{}\n\t\tb.coders.llEnc = &fseEncoder{}\n\t\tb.coders.llPrev = &fseEncoder{}\n\t}\n\tb.litEnc = &huff0.Scratch{WantLogLess: 4}\n\tb.reset(nil)\n}", "func New(handlers ...ImageHandler) *Goba {\n\treturn &Goba{handlers: handlers}\n}", "func NewEncode(salt string) *Encode {\n\thd := hashids.NewData()\n\thd.MinLength = HashKeyLen\n\thd.Salt = salt\n\th := hashids.NewWithData(hd)\n\treturn &Encode{\n\t\th: h,\n\t}\n}", "func newGFElement(data byte) gfElement {\n\treturn gfElement(data)\n}", "func new384Asm() hash.Hash { return nil }", "func NewCodec(key []byte) (crypto.Codec, error) {\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating cipher: %s\", err)\n\t}\n\taesgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating cipher: %s\", err)\n\t}\n\treturn &codec{\n\t\taesgcm: aesgcm,\n\t}, nil\n}", "func newGenerator(h hash.Hash, seed []byte) generator {\n\tif h == nil {\n\t\th = sha256.New()\n\t}\n\tb := h.Size()\n\tg := generator{\n\t\tkey: make([]byte, b),\n\t\tcounter: make([]byte, 16),\n\t\tmaxBytesPerRequest: (1 << 15) * b,\n\t\ttemp: make([]byte, b),\n\t\th: h,\n\t}\n\tif len(seed) != 0 {\n\t\t_, _ = g.Write(seed)\n\t}\n\treturn g\n}", "func (g *Gammas) GobEncode() ([]byte, error) {\n\tbuff := bytes.Buffer{}\n\tif g != nil {\n\t\tfor _, g2 := range *g {\n\t\t\tbuff.Write(g2.Marshal())\n\t\t}\n\t}\n\treturn buff.Bytes(), nil\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\tw: bufferedWriter(w),\n\t\tmaxLength: defaultMaxLength,\n\t}\n}", "func NewBinaryEncoding(c Constructor) *BinaryEncoding {\n\treturn &BinaryEncoding{Constructor: c}\n}", "func newJSONEncoder() *jsonEncoder {\n\tbuffer := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buffer)\n\n\treturn &jsonEncoder{\n\t\tbuffer: buffer,\n\t\tencoder: encoder,\n\t\tcontentType: jsonContentType,\n\t}\n}", "func NewCoder(buf []byte) *Coder {\n\tret := new(Coder)\n\n\tret.buf = buf\n\t// Figure 15.\n\tret.pos = 2\n\t// Figure 14.\n\tret.low = uint16(buf[0])<<8 | uint16(buf[1])\n\t// Figure 13.\n\tret.rng = 0xFF00\n\tret.cur_byte = -1\n\tif ret.low >= ret.rng {\n\t\tret.low = ret.rng\n\t\tret.pos = len(buf) - 1\n\t}\n\n\t// 3.8.1.3. Initial Values for the Context Model\n\tret.SetTable(DefaultStateTransition)\n\n\treturn ret\n}", "func newStandandTextEncoder(t *testing.T) textencoding.SimpleEncoder {\n\tenc, err := textencoding.NewSimpleTextEncoder(\"StandardEncoding\", nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Error: %v\", err)\n\t}\n\treturn enc\n}", "func ToGob(src interface{}) ([]byte, error) {\n\treturn NewGobber().To(src)\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\tw: w,\n\t\tbuf: make([]byte, 8),\n\t\tcrc: crc16.New(nil),\n\t}\n}", "func NewGzip() *Gzip {\n\treturn &Gzip{}\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}", "func New(client *steam.Client) *TF2 {\n\tt := &TF2{client}\n\tclient.GC.RegisterPacketHandler(t)\n\treturn t\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\tw: w,\n\t}\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\tw: w,\n\t}\n}", "func GobEncode(v interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := gob.NewEncoder(&buf).Encode(&v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (p *perceptron) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tencoder := gob.NewEncoder(&buf)\n\n\t// if err := encoder.Encode(&p.weights); err != nil {\n\t// \treturn nil, err\n\t// }\n\n\tif err := encoder.Encode(&p.weightsSF); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := encoder.Encode(&p.weightsTF); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(&p.totals); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(&p.steps); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(p.instancesSeen); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func NewCodec() *Codec {\n\treturn &Codec{}\n}", "func (s *Serializer) withEncoder(f func(encoder *gob.Encoder) error) ([]byte, error) {\n\tgzipWriter := s.writers.Get().(*gzip.Writer)\n\tdefer s.writers.Put(gzipWriter)\n\n\tencodeBuf := new(bytes.Buffer)\n\tif err := f(gob.NewEncoder(encodeBuf)); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcompressBuf := new(bytes.Buffer)\n\tgzipWriter.Reset(compressBuf)\n\n\tif _, err := io.Copy(gzipWriter, encodeBuf); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := gzipWriter.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn compressBuf.Bytes(), nil\n}", "func (sf singleFeature) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tencoder := gob.NewEncoder(&buf)\n\n\tif err := encoder.Encode(sf.featureType); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(sf.value); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (t *capsuleType) GobEncode() ([]byte, error) {\n\treturn nil, fmt.Errorf(\"cannot gob-encode capsule type %q\", t.FriendlyName(friendlyTypeName))\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{out: w}\n}", "func New(meta Metadata, data []byte) (*Snapshot, error) {\n\n\tvar b bytes.Buffer\n\tgw, err := gzip.NewWriterLevel(&b, gzip.BestSpeed)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error building gzip writer: %w\", err)\n\t}\n\tgw.Write(data)\n\tgw.Close()\n\n\treturn &Snapshot{meta: meta, data: b.Bytes()}, nil\n}", "func New(w, h int, s *fyne.Window) Buffer {\n\tcontext := gg.NewContext(w, h)\n\treturn Buffer{w, h, s, context}\n}", "func new_buffer(conn *websocket.Conn, ctrl chan struct{}, txqueuelen int) *Buffer {\n\tbuf := Buffer{conn: conn}\n\tbuf.pending = make(chan []byte, txqueuelen)\n\tbuf.ctrl = ctrl\n\tbuf.cache = make([]byte, packet.PACKET_LIMIT+2)\n\treturn &buf\n}", "func (t Type) GobEncode() ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\n\tgt := gobType{\n\t\tVersion: 0,\n\t\tImpl: t.typeImpl,\n\t}\n\n\terr := enc.Encode(gt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error encoding cty.Type: %s\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func NewEncoder(w io.Writer) goa.Encoder {\n\treturn codec.NewEncoder(w, &Handle)\n}", "func newLockBased() Interface {\n\tgate := &lockBased{}\n\tgate.mux.Lock()\n\treturn gate\n}", "func newCMLogger(name string, chainId string, logger *zap.SugaredLogger, logLevel log.LOG_LEVEL) *CMLogger {\n\treturn &CMLogger{name: name, chainId: chainId, SugaredLogger: logger, logLevel: logLevel}\n}", "func (this *TEncTop) Encode(flush bool, pcPicYuvOrg *TLibCommon.TComPicYuv, rcListPicYuvRecOut *list.List, accessUnitsOut *AccessUnits, iNumEncoded *int) {\n if pcPicYuvOrg != nil {\n // get original YUV\n pcPicCurr := this.xGetNewPicBuffer()\n pcPicYuvOrg.CopyToPic(pcPicCurr.GetPicYuvOrg())\n\n // compute image characteristics\n if this.GetEncCfg().GetUseAdaptiveQP() {\n pcPicCurr.XPreanalyze()\n }\n }\n\n if this.m_iNumPicRcvd == 0 || (!flush && this.m_iPOCLast != 0 && this.m_iNumPicRcvd != this.GetEncCfg().m_iGOPSize && this.GetEncCfg().m_iGOPSize != 0) {\n *iNumEncoded = 0\n return\n }\n\n //#if RATE_CONTROL_LAMBDA_DOMAIN\n if this.GetEncCfg().m_RCEnableRateControl {\n this.m_cRateCtrl.initRCGOP(this.m_iNumPicRcvd)\n }\n //#endif\n\n // compress GOP\n this.m_cGOPEncoder.compressGOP(this.m_iPOCLast, this.m_iNumPicRcvd, this.m_cListPic, rcListPicYuvRecOut, accessUnitsOut)\n\n //#if RATE_CONTROL_LAMBDA_DOMAIN\n if this.GetEncCfg().m_RCEnableRateControl {\n this.m_cRateCtrl.destroyRCGOP()\n }\n //#endif\n\n *iNumEncoded = this.m_iNumPicRcvd\n this.m_iNumPicRcvd = 0\n this.m_uiNumAllPicCoded += uint(*iNumEncoded)\n}", "func NewEncoder(H *http.Header) *Encoder {\n\tif H == nil {\n\t\tH = &http.Header{}\n\t}\n\treturn &Encoder{h: *H}\n}", "func GobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tencoder := gob.NewEncoder(&buff)\n\tif err := encoder.Encode(data); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func NewLegacy(ctx context.Context, connection string, tlsInfo tls.Config) (server.Backend, error) {\n\treturn newBackend(ctx, connection, tlsInfo, true)\n}", "func NewGenerator(hash hash.Hash, f NewCipher) (*Generator, error) {\n\tg := &Generator{\n\t\tkey: make([]byte, keySize, keySize),\n\t\thash: hash,\n\t\tnewCipher: f,\n\t}\n\n\tif err := g.updateCipher(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tctr, err := counter.New(uint(g.cipher.BlockSize()) * byteSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tg.counter = ctr\n\n\tg.buffer = make([]byte, len(g.counter), len(g.counter))\n\n\treturn g, nil\n}", "func New() *Coder {\n\tc := &Coder{\n\t\talphabet: defaultAlphabet,\n\t\tblockSize: defaultBlockSize,\n\t\tminLength: defaultMinLength,\n\t\tmask: mask(defaultBlockSize),\n\t\tmapping: mapping(defaultBlockSize),\n\t}\n\treturn c\n}", "func New(CTX *node.ServiceContext, config *Config) (*Bgmchain, error) {\n\tif config.SyncMode == downloader.LightSync {\n\t\treturn nil, errors.New(\"can't run bgmPtr.Bgmchain in light sync mode, use les.LightBgmchain\")\n\t}\n\tif !config.SyncMode.IsValid() {\n\t\treturn nil, fmt.Errorf(\"invalid sync mode %-d\", config.SyncMode)\n\t}\n\tchainDb, err := CreateDB(CTX, config, \"chaindata\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstopDbUpgrade := upgradeDeduplicateData(chainDb)\n\tchainConfig, genesisHash, genesisErr := bgmCore.SetupGenesisBlock(chainDb, config.Genesis)\n\tif _, ok := genesisErr.(*bgmparam.ConfigCompatError); genesisErr != nil && !ok {\n\t\treturn nil, genesisErr\n\t}\n\tbgmlogs.Info(\"Initialised chain configuration\", \"config\", chainConfig)\n\n\tbgm := &Bgmchain{\n\t\tconfig: config,\n\t\tchainDb: chainDb,\n\t\tchainConfig: chainConfig,\n\t\teventMux: CTX.EventMux,\n\t\taccountManager: CTX.AccountManager,\n\t\tengine: dpos.New(chainConfig.Dpos, chainDb),\n\t\tshutdownChan: make(chan bool),\n\t\tstopDbUpgrade: stopDbUpgrade,\n\t\tnetworkId: config.NetworkId,\n\t\tgasPrice: config.GasPrice,\n\t\tvalidator: config.Validator,\n\t\tcoinbase: config.Coinbase,\n\t\tbloomRequests: make(chan chan *bloombits.Retrieval),\n\t\tbloomIndexer: NewBloomIndexer(chainDb, bgmparam.BloomBitsBlocks),\n\t}\n\n\tbgmlogs.Info(\"Initialising Bgmchain protocol\", \"versions\", ProtocolVersions, \"network\", config.NetworkId)\n\n\tif !config.SkipBcVersionCheck {\n\t\tbcVersion := bgmCore.GetBlockChainVersion(chainDb)\n\t\tif bcVersion != bgmCore.BlockChainVersion && bcVersion != 0 {\n\t\t\treturn nil, fmt.Errorf(\"Blockchain DB version mismatch (%-d / %-d). Run gbgm upgradedbPtr.\\n\", bcVersion, bgmCore.BlockChainVersion)\n\t\t}\n\t\tbgmCore.WriteBlockChainVersion(chainDb, bgmCore.BlockChainVersion)\n\t}\n\tvmConfig := vmPtr.Config{EnablePreimageRecording: config.EnablePreimageRecording}\n\tbgmPtr.blockchain, err = bgmCore.NewBlockChain(chainDb, bgmPtr.chainConfig, bgmPtr.engine, vmConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Rewind the chain in case of an incompatible config upgrade.\n\tif compat, ok := genesisErr.(*bgmparam.ConfigCompatError); ok {\n\t\tbgmlogs.Warn(\"Rewinding chain to upgrade configuration\", \"err\", compat)\n\t\tbgmPtr.blockchain.SetHead(compat.RewindTo)\n\t\tbgmCore.WriteChainConfig(chainDb, genesisHash, chainConfig)\n\t}\n\tbgmPtr.bloomIndexer.Start(bgmPtr.blockchain)\n\n\tif config.TxPool.Journal != \"\" {\n\t\tconfig.TxPool.Journal = CTX.ResolvePath(config.TxPool.Journal)\n\t}\n\tbgmPtr.txPool = bgmCore.NewTxPool(config.TxPool, bgmPtr.chainConfig, bgmPtr.blockchain)\n\n\tif bgmPtr.protocolManager, err = NewProtocolManager(bgmPtr.chainConfig, config.SyncMode, config.NetworkId, bgmPtr.eventMux, bgmPtr.txPool, bgmPtr.engine, bgmPtr.blockchain, chainDb); err != nil {\n\t\treturn nil, err\n\t}\n\tbgmPtr.miner = miner.New(bgm, bgmPtr.chainConfig, bgmPtr.EventMux(), bgmPtr.engine)\n\tbgmPtr.miner.SetExtra(makeExtraData(config.ExtraData))\n\n\tbgmPtr.ApiBackend = &BgmApiBackend{bgm, nil}\n\tgpobgmparam := config.GPO\n\tif gpobgmparam.Default == nil {\n\t\tgpobgmparam.Default = config.GasPrice\n\t}\n\tbgmPtr.ApiBackend.gpo = gasprice.NewOracle(bgmPtr.ApiBackend, gpobgmparam)\n\n\treturn bgm, nil\n}", "func New(alphabet []byte) *Encoding {\n\tenc := &Encoding{}\n\tcopy(enc.alphabet[:], alphabet[:])\n\tfor i := range enc.decodeMap {\n\t\tenc.decodeMap[i] = -1\n\t}\n\tfor i, b := range enc.alphabet {\n\t\tenc.decodeMap[b] = int64(i)\n\t}\n\treturn enc\n}", "func newCipherGeneric(key []byte) (cipher.Block, error) {\r\n\tc := sm4Cipher{make([]uint32, rounds), make([]uint32, rounds)}\r\n\texpandKeyGo(key, c.enc, c.dec)\r\n\treturn &c, nil\r\n}", "func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser {\n\treturn &encoder{enc: enc, w: w}\n}", "func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser {\n\treturn &encoder{enc: enc, w: w}\n}", "func NewEncoding(encoder string) (*Encoding, error) {\n\tif len(encoder) != 58 {\n\t\treturn nil, errors.New(\"base58: encoding alphabet is not 58-bytes\")\n\t}\n\tfor i := 0; i < len(encoder); i++ {\n\t\tif encoder[i] == '\\n' || encoder[i] == '\\r' {\n\t\t\treturn nil, errors.New(\"base58: encoding alphabet contains newline character\")\n\t\t}\n\t}\n\te := new(Encoding)\n\tfor i := range e.decodeMap {\n\t\te.decodeMap[i] = -1\n\t}\n\tfor i := range encoder {\n\t\te.encode[i] = byte(encoder[i])\n\t\te.decodeMap[e.encode[i]] = i\n\t}\n\treturn e, nil\n}", "func (d *Decoder) GOB(val interface{}) {\n\tgobd := gob.NewDecoder(d.buf)\n\tif err := gobd.Decode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to decode: %v\", err)\n\t}\n}", "func newBase() *base {\n\treturn &base{shared.NewUUID(), time.Now().UTC(), time.Now().UTC(), false/*, shared.NewUUID()*/}\n}", "func (info *ImageInfoType) GobEncode() (buf []byte, err error) {\n\tfields := []interface{}{info.data, info.smask, info.n, info.w, info.h, info.cs,\n\t\tinfo.pal, info.bpc, info.f, info.dp, info.trns, info.scale, info.dpi}\n\tw := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(w)\n\tfor j := 0; j < len(fields) && err == nil; j++ {\n\t\terr = encoder.Encode(fields[j])\n\t}\n\tif err == nil {\n\t\tbuf = w.Bytes()\n\t}\n\treturn\n}", "func NewEncoder(w io.Writer) *Encoder {\n\treturn NewVersionedEncoder(DefaultVersion, w)\n}", "func gobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tenc := gob.NewEncoder(&buff)\n\terr := enc.Encode(data)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func newFloat(value *big.Float) *TypedFloat {\n\tbytes, _ := value.GobEncode()\n\ttypedFloat := TypedFloat{\n\t\tBytes: bytes,\n\t\tType: ValueType_FLOAT,\n\t}\n\treturn &typedFloat\n}", "func New(x, y, w, h int, ctx *GLContext) (*GoGLBackend, error) {\n\tif ctx == nil {\n\t\tvar err error\n\t\tctx, err = NewGLContext()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tb := &GoGLBackend{\n\t\tw: w,\n\t\th: h,\n\t\tfw: float64(w),\n\t\tfh: float64(h),\n\t\tGLContext: ctx,\n\t}\n\n\tb.activateFn = func() {\n\t\tgl.BindFramebuffer(gl.FRAMEBUFFER, 0)\n\t\tgl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))\n\t\t// todo reapply clipping since another application may have used the stencil buffer\n\t}\n\tb.disableTextureRenderTarget = func() {\n\t\tgl.BindFramebuffer(gl.FRAMEBUFFER, 0)\n\t\tgl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))\n\t}\n\n\treturn b, nil\n}", "func newGraphMinion(id uint32, graph *graph.GrootGraph, wg *sync.WaitGroup) *graphMinion {\n\treturn &graphMinion{\n\t\tid: id,\n\t\tgraph: graph,\n\t\tinputChannel: make(chan *lshforest.Key, BUFFERSIZE),\n\t\twg: wg,\n\t}\n}" ]
[ "0.70255053", "0.62390405", "0.6189565", "0.6124616", "0.59634024", "0.5905049", "0.589319", "0.5539999", "0.5469765", "0.5434318", "0.5348389", "0.533494", "0.53337526", "0.52019095", "0.5191129", "0.51703334", "0.51524764", "0.51320297", "0.5115051", "0.5044243", "0.5037294", "0.5000761", "0.49911353", "0.49618405", "0.4916901", "0.49065864", "0.4893044", "0.48885816", "0.4872873", "0.4871193", "0.48605072", "0.48519728", "0.48460916", "0.48422354", "0.48186484", "0.480823", "0.47936922", "0.47915313", "0.47903016", "0.4773144", "0.47532246", "0.47345784", "0.47255212", "0.47235724", "0.47172138", "0.4697476", "0.46922597", "0.46917766", "0.46851015", "0.46848986", "0.46769458", "0.4668593", "0.46671477", "0.46645087", "0.46598867", "0.46571636", "0.46559104", "0.46530566", "0.463428", "0.463428", "0.463428", "0.463428", "0.463428", "0.4621362", "0.4617502", "0.4617502", "0.4613842", "0.46072188", "0.4601964", "0.4600894", "0.45973966", "0.45814323", "0.45781025", "0.45735383", "0.45617297", "0.4555413", "0.455306", "0.45528173", "0.45526397", "0.45467946", "0.454666", "0.4546506", "0.45411173", "0.45359322", "0.45315075", "0.45251137", "0.45216116", "0.45214415", "0.4520441", "0.45171338", "0.45171338", "0.45128062", "0.45124578", "0.4512206", "0.45114398", "0.451122", "0.45110145", "0.45043734", "0.4497797", "0.44894224" ]
0.78135526
0
NewGobDecoderLight returns a new lockfree decoder
func NewGobDecoderLight() *GobDecoderLight { ret := &GobDecoderLight{ bytes: &bytes.Buffer{}, } ret.decoder = gob.NewDecoder(ret.bytes) return ret }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGobEncoderLight() *GobEncoderLight {\n\tret := &GobEncoderLight{\n\t\tbytes: &bytes.Buffer{},\n\t}\n\tret.encoder = gob.NewEncoder(ret.bytes)\n\treturn ret\n}", "func NewDecoder(data unsafe.Pointer, length C.int) *FLBDecoder {\n\tvar b []byte\n\n\tdec := new(FLBDecoder)\n\tdec.handle = new(codec.MsgpackHandle)\n\tdec.handle.SetExt(reflect.TypeOf(FLBTime{}), 0, &FLBTime{})\n\n\tb = C.GoBytes(data, length)\n\tdec.mpdec = codec.NewDecoderBytes(b, dec.handle)\n\n\treturn dec\n}", "func NewDecoder() *Decoder {\n\treturn &Decoder{\n\t\tbuffer: []byte{},\n\t\tcache: make(map[string]struct{}),\n\t}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\td := &Decoder{\n\t\tr: r,\n\t\tserializer: make(chan pair, 8000), // typical PrimitiveBlock contains 8k OSM entities\n\t}\n\td.SetBufferSize(initialBlobBufSize)\n\treturn d\n}", "func NewDecoder(data []byte) *Decoder {\n\tdec := decoderPool.Get().(*Decoder)\n\tdec.Reset(data)\n\treturn dec\n}", "func NewGobTranscoder() *GobTranscoder {\n\tret := &GobTranscoder{\n\t\tinBytes: &bytes.Buffer{},\n\t\toutBytes: &bytes.Buffer{},\n\t\tencoderMut: &sync.Mutex{},\n\t\tdecoderMut: &sync.Mutex{},\n\t}\n\tret.encoder = gob.NewEncoder(ret.outBytes)\n\tret.decoder = gob.NewDecoder(ret.inBytes)\n\treturn ret\n}", "func newLightFetcher(h *clientHandler) *lightFetcher {\n\tf := &lightFetcher{\n\t\thandler: h,\n\t\tchain: h.backend.blockchain,\n\t\tpeers: make(map[*peer]*fetcherPeerInfo),\n\t\tdeliverChn: make(chan fetchResponse, 100),\n\t\trequested: make(map[uint64]fetchRequest),\n\t\ttimeoutChn: make(chan uint64),\n\t\trequestTrigger: make(chan struct{}, 1),\n\t\tsyncDone: make(chan *peer),\n\t\tcloseCh: make(chan struct{}),\n\t\tmaxConfirmedTd: big.NewInt(0),\n\t}\n\th.backend.peers.notify(f)\n\n\tf.wg.Add(1)\n\tgo f.syncLoop()\n\treturn f\n}", "func NewDecoder(src blob.Fetcher) *Decoder {\n\treturn &Decoder{src: src}\n}", "func NewGOBCodec() *GOBCodec {\n\tr := GOBCodec(0)\n\treturn &r\n}", "func NewDecoder(opts DecoderOptions) (*Decoder, error) {\n\tvar d Decoder\n\tif err := opts.validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"imaging: error validating decoder options: %w\", err)\n\t}\n\tif opts.ConcurrencyLevel > 0 {\n\t\td.sem = make(chan struct{}, opts.ConcurrencyLevel)\n\t}\n\td.opts = opts\n\treturn &d, nil\n}", "func NewDecoder() *Decoder {\n\treturn &Decoder{cache: newCache(), ignoreUnknownKeys: true, maxMemory: 10 << 20}\n}", "func NewDecoder() *Decoder {\n\n\treturn &Decoder{\n\t\ttagName: \"form\",\n\t\tstructCache: newStructCacheMap(),\n\t\tmaxArraySize: 10000,\n\t\tdataPool: &sync.Pool{New: func() interface{} {\n\t\t\treturn make(dataMap, 0, 0)\n\t\t}},\n\t}\n}", "func GOB() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn gob.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn gob.NewDecoder(r)\n\t\t},\n\t)\n}", "func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}", "func New() Framer {\n\tf := &framer{\n\t\tbufLock: &sync.RWMutex{},\n\t\tbuffer: make([]byte, 0),\n\t}\n\n\treturn f\n}", "func NewDecoder() *Decoder {\n\td := new(Decoder)\n\td.pulse.sec = int8(ErrInit)\n\td.c = make(chan pulse, 1)\n\treturn d\n}", "func New(client *steam.Client) *TF2 {\n\tt := &TF2{client}\n\tclient.GC.RegisterPacketHandler(t)\n\treturn t\n}", "func NewDecoder(b []byte) *Decoder {\n\treturn &Decoder{orig: b, in: b}\n}", "func NewDecoder() Decoder {\n\treturn Decoder{}\n}", "func NewBareDecoder(l []id.Item, in io.Reader, endian bool) Decoding {\n\tp := &Bare{}\n\tp.in = in\n\tp.syncBuffer = make([]byte, 0, defaultSize)\n\tp.lut = MakeLut(l)\n\tp.endian = endian\n\treturn p.Decoding\n}", "func (c *raptorCodec) NewDecoder(messageLength int) Decoder {\n\treturn newRaptorDecoder(c, messageLength)\n}", "func NewGCM(b cipher.Block, tagSizeInBits int, iv []byte) (GaloisCounterMode, error) {\n if b.BlockSize() != 16 && b.BlockSize() != 18 && b.BlockSize() != 24 {\n return nil, errors.New(\"Block cipher MUST have a 128-bit block size\")\n }\n\n if tagSizeInBits <= 0 {\n tagSizeInBits = 128\n }\n\n h := make([]byte, 16)\n b.Encrypt(h, zeroes[:16])\n\n return &gcm{\n b: b,\n blockSize: b.BlockSize(),\n iv: dup(iv),\n h: h,\n tagSize: tagSizeInBits / 8,\n tmp: make([]byte, b.BlockSize()),\n }, nil\n}", "func GobGenerateDecoder(r io.Reader) Decoder {\n\treturn gob.NewDecoder(r)\n}", "func newClearsignDecoder(kr openpgp.KeyRing) *clearsignDecoder {\n\treturn &clearsignDecoder{\n\t\tkr: kr,\n\t}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r, 0}\n}", "func NewDecoder(rd io.ReadSeeker) *Decoder {\n\n\tdec := &Decoder{\n\t\tinput: rd,\n\t\tcurrentType: typeUninited,\n\t}\n\n\treturn dec\n}", "func NewDecoder(r io.Reader, tags CodeSpace, attrs CodeSpace) *Decoder {\n\td := &Decoder{\n\t\tr: r,\n\n\t\ttags: tags,\n\t\tattrs: attrs,\n\t\ttokChan: make(chan Token),\n\t}\n\n\tgo d.run()\n\treturn d\n}", "func (cfg frozenConfig) NewDecoder(reader io.Reader) Decoder {\n dec := decoder.NewStreamDecoder(reader)\n dec.SetOptions(cfg.decoderOpts)\n return dec\n}", "func New() Go { return Go{} }", "func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {\n\tr := pr.NewDecoderRaw(k, idx)\n\tr.Sync(marker)\n\treturn r\n}", "func NewReader(r io.Reader, rd int) (*Reader, error) {\n\tbg, err := bgzf.NewReader(r, rd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th, _ := sam.NewHeader(nil, nil)\n\tbr := &Reader{\n\t\tr: bg,\n\t\th: h,\n\n\t\treferences: int32(len(h.Refs())),\n\t}\n\terr = br.h.DecodeBinary(br.r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbr.lastChunk.End = br.r.LastChunk().End\n\treturn br, nil\n}", "func ChromaHighlightNew(buff interface{}, formatter ...int) (c *ChromaHighlight, err error) {\n\n\tc = new(ChromaHighlight)\n\n\tswitch b := buff.(type) {\n\tcase *gtk.TextBuffer:\n\t\tc.txtBuff = b\n\tcase *source.SourceBuffer:\n\t\tc.srcBuff = b\n\t}\n\n\t// Get styles & languages lists\n\tc.Lexers = lexers.Names(false)\n\tc.Styles = styles.Names()\n\tc.intStyles = make(map[string]bool)\n\tc.intLexers = make(map[string]bool)\n\tfor _, name := range c.Lexers {\n\t\tc.intLexers[name] = true\n\t}\n\tfor _, name := range c.Styles {\n\t\tc.intStyles[name] = true\n\t}\n\t// Set default values\n\tc.defaultLexerName = \"Go\"\n\tc.defaultStyleName = \"pygments\"\n\n\tc.errAlreadyExist = errors.New(\"Already exist !\")\n\n\t// formatter option\n\tif len(formatter) > 0 {\n\t\tswitch formatter[0] {\n\t\tcase 1:\n\t\t\tc.formatter = \"gtkTextBuffer\"\n\t\t\tc.Formatter = 1\n\t\tcase 2:\n\t\t\tc.formatter = \"pango\"\n\t\t\tc.Formatter = 2\n\t\tdefault:\n\t\t\tc.formatter = \"gtkDirectToTextBuffer\"\n\t\t\tc.Formatter = 0\n\t\t}\n\t} else {\n\t\tc.formatter = \"gtkTextBuffer\"\n\t}\n\treturn\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{\n\t\tr: r,\n\t\tbuf: make([]byte, 2048),\n\t}\n}", "func NewDecoder(cfg *Config) (*Decoder, error) {\n\tif cfg == nil {\n\t\tcfg = NewConfig()\n\t}\n\tdec := &Decoder{\n\t\tcfg: cfg,\n\t\tdec: pocketsphinx.Init(cfg.CommandLn()),\n\t}\n\tif dec.dec == nil {\n\t\tcfg.Destroy()\n\t\terr := errors.New(\"pocketsphinx.Init failed\")\n\t\treturn nil, err\n\t}\n\tdec.SetRawDataSize(0)\n\treturn dec, nil\n}", "func NewDecoder(format NvPipeFormat, codec NvPipeCodec, width int, height int) *Decoder {\n\tvar decoder Decoder\n\tdec := C.NvPipe_CreateDecoder(\n\t\tC.NvPipe_Format(format),\n\t\tC.NvPipe_Codec(codec),\n\t\tC.uint32_t(width),\n\t\tC.uint32_t(height),\n\t)\n\tdecoder.dec = dec\n\tdecoder.width = width\n\tdecoder.height = height\n\treturn &decoder\n}", "func NewProxy() *Proxy {\n return NewProxyWithBuffer(fbe.NewEmptyBuffer())\n}", "func NewProxy() *Proxy {\n return NewProxyWithBuffer(fbe.NewEmptyBuffer())\n}", "func NewProxy() *Proxy {\n return NewProxyWithBuffer(fbe.NewEmptyBuffer())\n}", "func NewDecoder(r Reader, uf UnmarshalFunc) *Decoder {\n\treturn &Decoder{r: r, buf: make([]byte, 4096), uf: uf}\n}", "func NewBinaryCodec() *BinaryCodec {\n var c *BinaryCodec = &BinaryCodec{}\n c.buf = &bytes.Buffer{}\n return c\n}", "func NewDecoder(r io.Reader) goa.Decoder {\n\treturn codec.NewDecoder(r, &Handle)\n}", "func NewDecoder(b []byte) *Decoder {\n\treturn &Decoder{\n\t\tbytes: b,\n\t}\n}", "func NewDecoder(buf []byte) (Decoder, error) {\n\t// Check buffer length before accessing it\n\tif len(buf) == 0 {\n\t\treturn nil, ErrInvalidImage\n\t}\n\n\tisBufGIF := isGIF(buf)\n\tif isBufGIF {\n\t\treturn newGifDecoder(buf)\n\t}\n\n\tmaybeDecoder, err := newOpenCVDecoder(buf)\n\tif err == nil {\n\t\treturn maybeDecoder, nil\n\t}\n\n\treturn newAVCodecDecoder(buf)\n}", "func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder {\n\treturn &Decoder{\n\t\tdecoder: decoder,\n\t\tembeddedDecoder: embeddedDecoder,\n\t}\n}", "func newGCM(cipher goCipher.Block) (aeadIf, error) {\n\treturn newGCMWithNonceAndTagSize(cipher, gcmStandardNonceSize, gcmTagSize)\n}", "func NewDecoder(provider ConfigProvider) *Decoder {\n\td := &Decoder{\n\t\tprovider: provider,\n\t}\n\treturn d\n}", "func newKeepaliveFrame() frame {\n\tf := newPayloadFrame(frameIDKeepalive, []byte{})\n\treturn f\n}", "func (z *Rat) GobDecode(buf []byte) error {}", "func NewLightbulb(info Info) *Lightbulb {\n\tacc := Lightbulb{}\n\tacc.Accessory = New(info, TypeLightbulb)\n\tacc.Lightbulb = service.NewLightbulb()\n\n\tacc.AddService(acc.Lightbulb.Service)\n\n\treturn &acc\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r: r}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r: r}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r: r}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r: r}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r: r}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r: r}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileBareMetalAsset{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewDecoder(r io.Reader) (d *Decoder) {\n scanner := bufio.NewScanner(r)\n return &Decoder{\n scanner: scanner,\n lineno: 0,\n }\n}", "func New(opts ...Option) Decoder {\n\td := defaultDecoder\n\tfor _, o := range opts {\n\t\to(&d)\n\t}\n\treturn d\n}", "func NewDecoder(reader io.Reader) *Decoder {\n\treturn &Decoder{ByteReader: bufio2.NewReaderSize(reader, 2048)}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r}\n}", "func (b *blockEnc) initNewEncode() {\n\tb.recentOffsets = [3]uint32{1, 4, 8}\n\tb.litEnc.Reuse = huff0.ReusePolicyNone\n\tb.coders.setPrev(nil, nil, nil)\n}", "func NewDecoder(r io.ReadSeeker, dam DecoderContainerResolver) (d *Decoder) {\r\n\tif dam == nil {\r\n\t\tdam = &DefaultDecoderContainerResolver\r\n\t}\r\n\td = &Decoder{r: r, dam: dam}\r\n\td.t1, d.t2, d.t4, d.t8 = d.x[:1], d.x[:2], d.x[:4], d.x[:8]\r\n\treturn\r\n}", "func NewDecoder(scheme *runtime.Scheme) (types.Decoder, error) {\n\treturn decoder{codecs: serializer.NewCodecFactory(scheme)}, nil\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{\n\t\tr: r,\n\t\tbuf: nil,\n\t}\n}", "func New(w, h int, s *fyne.Window) Buffer {\n\tcontext := gg.NewContext(w, h)\n\treturn Buffer{w, h, s, context}\n}", "func New() *Codec {\n\treturn &Codec{}\n}", "func New(glabel string, flags int) (*GlvlStruct, error) {\n\treturn newll(glabel, flags, LflagsDef, nil, false)\n}", "func NewDecoder(obj interface{}, fn Decode) *Decoder {\n\treturn &Decoder{\n\t\tType: reflect.TypeOf(obj),\n\t\tFunc: fn,\n\t}\n}", "func NewDecoder(obj interface{}, fn Decode) *Decoder {\n\treturn &Decoder{\n\t\ttyp: reflect.TypeOf(obj),\n\t\tfn: fn,\n\t}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\td := new(Decoder)\n\td.Reset(r)\n\treturn d\n}", "func newBackloader(conf *options) (*backloader, error) {\n\t// validate config\n\tif err := conf.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create producer\n\tp, err := bus.NewProducer(&conf.Bus)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &backloader{\n\t\tbusProducer: p,\n\t\tconfig: conf,\n\t}, nil\n}", "func newRaptorDecoder(c *raptorCodec, length int) *raptorDecoder {\n\td := &raptorDecoder{codec: *c, messageLength: length}\n\n\tl, s, h := intermediateSymbols(c.NumSourceSymbols)\n\n\t// Add the S + H intermediate symbol composition equations.\n\td.matrix.coeff = make([][]int, l)\n\td.matrix.v = make([]block, l)\n\n\tk := c.NumSourceSymbols\n\tcompositions := make([][]int, s)\n\n\tfor i := 0; i < k; i++ {\n\t\ta := 1 + (int(math.Floor(float64(i)/float64(s))) % (s - 1))\n\t\tb := i % s\n\t\tcompositions[b] = append(compositions[b], i)\n\t\tb = (b + a) % s\n\t\tcompositions[b] = append(compositions[b], i)\n\t\tb = (b + a) % s\n\t\tcompositions[b] = append(compositions[b], i)\n\t}\n\tfor i := 0; i < s; i++ {\n\t\tcompositions[i] = append(compositions[i], k+i)\n\t\td.matrix.addEquation(compositions[i], block{})\n\t}\n\n\tcompositions = make([][]int, h)\n\n\thprime := int(math.Ceil(float64(h) / 2))\n\tm := buildGraySequence(k+s, hprime)\n\tfor i := 0; i < h; i++ {\n\t\tfor j := 0; j < k+s; j++ {\n\t\t\tif bitSet(uint(m[j]), uint(i)) {\n\t\t\t\tcompositions[i] = append(compositions[i], j)\n\t\t\t}\n\t\t}\n\t\tcompositions[i] = append(compositions[i], k+s+i)\n\t\td.matrix.addEquation(compositions[i], block{})\n\t}\n\n\treturn d\n}", "func NewDecoder(enc *Encoding, r io.Reader) io.Reader {\n\treturn &decoder{enc: enc, r: r}\n}", "func New(cfg *config.Config, log logger.Logger) (*FtmBridge, error) {\n\tcli, con, err := connect(cfg, log)\n\tif err != nil {\n\t\tlog.Criticalf(\"can not open connection; %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// build the bridge structure using the con we have\n\tbr := &FtmBridge{\n\t\trpc: cli,\n\t\teth: con,\n\t\tlog: log,\n\t\tcg: new(singleflight.Group),\n\n\t\t// special configuration options below this line\n\t\tsigConfig: &cfg.Signature,\n\t\tsfcConfig: &cfg.Staking,\n\t\tuniswapConfig: &cfg.DeFi.Uniswap,\n\t\tfMintCfg: fMintConfig{\n\t\t\taddressProvider: cfg.DeFi.FMint.AddressProvider,\n\t\t},\n\t\tfLendCfg: fLendConfig{lendigPoolAddress: cfg.DeFi.FLend.LendingPool},\n\n\t\t// empty shards\n\t\tsfcShards: sfcShards{log: log, client: con, sfc: cfg.Staking.SFCContract},\n\n\t\t// configure block observation loop\n\t\twg: new(sync.WaitGroup),\n\t\tsigClose: make(chan bool, 1),\n\t\theaders: make(chan *etc.Header, rpcHeadProxyChannelCapacity),\n\t}\n\n\t// inform about the local address of the API node\n\tlog.Noticef(\"using signature address %s\", br.sigConfig.Address.String())\n\n\t// add the bridge ref to the fMintCfg and return the instance\n\tbr.fMintCfg.bridge = br\n\tbr.run()\n\treturn br, nil\n}", "func New() FormDecoder {\n\tif SchemaDecoder == nil {\n\t\tSchemaDecoder = schema.NewDecoder()\n\t}\n\treturn FormDecoder{\n\t\tSchemaDecoder: SchemaDecoder,\n\t}\n}", "func (d *Decoder) GOB(val interface{}) {\n\tgobd := gob.NewDecoder(d.buf)\n\tif err := gobd.Decode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to decode: %v\", err)\n\t}\n}", "func newGoFactory() *GOFactory {\n\tgologger.SLogger.Println(\"Init Game Object Factory Singleton\")\n\tfOnce.Do(func() {\n\t\tgofactory = &GOFactory{\n\t\t\tGoCreator: make(map[string]ICreator),\n\t\t}\n\t})\n\treturn gofactory\n}", "func newParser(src []byte) *blockParser {\n\tp := &parser{\n\t\tsrc: src,\n\t}\n\tbp := &blockParser{parser: p, blockChan: make(chan Block)}\n\tgo bp.run()\n\treturn bp\n}", "func newTemplateDecoder(config *Config, decoder resources.Decoder) *templateDecoder {\n\treturn &templateDecoder{\n\t\tconfig: config,\n\t\tdecoder: decoder,\n\t}\n}", "func newCodecRegistry() *codecs {\n\treturn &codecs{\n\t\tmu: new(sync.RWMutex),\n\t\titems: make(map[string]codec.Interface),\n\t}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{\n\t\tr: bufio.NewReaderSize(r, bufferSize),\n\t\tctx: context.Background(),\n\t}\n}", "func NewDecoder(filename string, src interface{}) (*Decoder, error) {\n\td, err := newParser(filename, src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Decoder{parser: d}, nil\n}", "func NewFrameBuffer() *FrameBuffer {\n\n\tvar fb FrameBuffer\n\tfb.Strips = make([]LedStrip, 0, config.StripsPerTeensy)\n\n\tif config.Titania {\n\t\t// Titania config\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 229))\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(false, 178))\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(false, 228))\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 0))\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 0))\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 0))\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 0))\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 0))\n\t} else {\n\t\t// Bedroom config\n\t\t// 0, 1 Unused strips (bedroom)\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 0))\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 0))\n\n\t\t// 2 Bed wall\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(false, 168))\n\n\t\t// 3 Bed curtains\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 164))\n\n\t\t// 4 Bed ceiling\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(false, 165))\n\n\t\t// 5 Dressing table wall\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 85))\n\n\t\t// 6 Dressing table ceiling\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 80))\n\n\t\t// 7 Dressing table curtain\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(false, 162))\n\n\t\t// 8 Bathroom mirror wall\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 172))\n\n\t\t// 9 Bath ceiling\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(false, 226))\n\n\t\t// 10 Bath+ wall\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(false, 291))\n\n\t\t// 11 Bathroom mirror ceiling\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 162))\n\n\t\t// 12 Unused\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 0))\n\n\t\t// 13 Left of door ceiling\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(true, 88))\n\n\t\t// 14 Right of door ceiling\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(false, 142))\n\n\t\t// 15 Right of door wall\n\t\tfb.Strips = append(fb.Strips, *NewLedStrip(false, 122))\n\t}\n\n\t// Sanity check\n\tnumberOfStrips := len(fb.Strips)\n\tif numberOfStrips <= 0 || numberOfStrips%config.StripsPerTeensy != 0 {\n\t\tlog.WithField(\"StripsPerTeensy\", strconv.Itoa(config.StripsPerTeensy)).Panic(\"framebuffer strips must be multiple of\")\n\t}\n\treturn &fb\n}", "func New(handlers ...ImageHandler) *Goba {\n\treturn &Goba{handlers: handlers}\n}", "func NewDecoder(r io.Reader) (*Decoder, error) {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"reading data before decoding\")\n\t}\n\n\treturn &Decoder{buf: bytes.NewReader(data)}, nil\n}", "func NewDecoder(r io.ReaderAt) *Decoder {\n\treturn &Decoder{r: r}\n}", "func NewDecoder(o DecoderOptions, eh *astiencoder.EventHandler, c *astikit.Closer) (d *Decoder, err error) {\n\t// Extend node metadata\n\tcount := atomic.AddUint64(&countDecoder, uint64(1))\n\to.Node.Metadata = o.Node.Metadata.Extend(fmt.Sprintf(\"decoder_%d\", count), fmt.Sprintf(\"Decoder #%d\", count), \"Decodes\", \"decoder\")\n\n\t// Create decoder\n\td = &Decoder{\n\t\tc: astikit.NewChan(astikit.ChanOptions{\n\t\t\tAddStrategy: astikit.ChanAddStrategyBlockWhenStarted,\n\t\t\tProcessAll: true,\n\t\t}),\n\t\teh: eh,\n\t\toutputCtx: o.OutputCtx,\n\t\tstatIncomingRate: astikit.NewCounterRateStat(),\n\t\tstatWorkRatio: astikit.NewDurationPercentageStat(),\n\t}\n\td.BaseNode = astiencoder.NewBaseNode(o.Node, astiencoder.NewEventGeneratorNode(d), eh)\n\td.d = newFrameDispatcher(d, eh, c)\n\td.addStats()\n\n\t// Find decoder\n\tvar cdc *avcodec.Codec\n\tif cdc = avcodec.AvcodecFindDecoder(o.CodecParams.CodecId()); cdc == nil {\n\t\terr = fmt.Errorf(\"astilibav: no decoder found for codec id %+v\", o.CodecParams.CodecId())\n\t\treturn\n\t}\n\n\t// Alloc context\n\tif d.ctxCodec = cdc.AvcodecAllocContext3(); d.ctxCodec == nil {\n\t\terr = fmt.Errorf(\"astilibav: no context allocated for codec %+v\", cdc)\n\t\treturn\n\t}\n\n\t// Copy codec parameters\n\tif ret := avcodec.AvcodecParametersToContext(d.ctxCodec, o.CodecParams); ret < 0 {\n\t\terr = fmt.Errorf(\"astilibav: avcodec.AvcodecParametersToContext failed: %w\", NewAvError(ret))\n\t\treturn\n\t}\n\n\t// Open codec\n\tif ret := d.ctxCodec.AvcodecOpen2(cdc, nil); ret < 0 {\n\t\terr = fmt.Errorf(\"astilibav: d.ctxCodec.AvcodecOpen2 failed: %w\", NewAvError(ret))\n\t\treturn\n\t}\n\n\t// Make sure the codec is closed\n\tc.Add(func() error {\n\t\tif ret := d.ctxCodec.AvcodecClose(); ret < 0 {\n\t\t\temitAvError(nil, eh, ret, \"d.ctxCodec.AvcodecClose failed\")\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}", "func newBuffer(b []byte) *buffer {\n\treturn &buffer{proto.NewBuffer(b), 0}\n}", "func New() (g *Glutton, err error) {\n\tg = &Glutton{}\n\tg.protocolHandlers = make(map[string]protocolHandlerFunc, 0)\n\tviper.SetDefault(\"var-dir\", \"/var/lib/glutton\")\n\tif err = g.makeID(); err != nil {\n\t\treturn nil, err\n\t}\n\tg.logger = NewLogger(g.id.String())\n\n\t// Loading the congiguration\n\tg.logger.Info(\"Loading configurations from: config/conf.yaml\", zap.String(\"reporter\", \"glutton\"))\n\tg.conf, err = config.Init(g.logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trulesPath := g.conf.GetString(\"rules_path\")\n\trulesFile, err := os.Open(rulesPath)\n\tdefer rulesFile.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tg.rules, err = freki.ReadRulesFromFile(rulesFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g, nil\n\n}", "func NewBulb(ip string) *Bulb {\n\tbulb := &Bulb{\n\t\tstandardCommands{},\n\t\tcommonCommands{},\n\t\tbackgroundLightCommands{},\n\t\tip,\n\t\t55443, // 55443 is a constant protocol port\n\t\tnil,\n\t\tmake(map[int]chan Response),\n\t\tsync.Mutex{},\n\t}\n\t// I know It looks badly, but \"It is working? It is working\"\n\tbulb.standardCommands.commander = bulb\n\tbulb.commonCommands.commander = bulb\n\tbulb.Bg.commander = bulb\n\tbulb.Bg.prefix = \"bg_\"\n\treturn bulb\n}", "func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {\n\tinitPredefined()\n\tvar d Decoder\n\td.o.setDefault()\n\tfor _, o := range opts {\n\t\terr := o(&d.o)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\td.current.crc = xxhash.New()\n\td.current.flushed = true\n\n\tif r == nil {\n\t\td.current.err = ErrDecoderNilInput\n\t}\n\n\t// Transfer option dicts.\n\td.dicts = make(map[uint32]*dict, len(d.o.dicts))\n\tfor _, dc := range d.o.dicts {\n\t\td.dicts[dc.id] = dc\n\t}\n\td.o.dicts = nil\n\n\t// Create decoders\n\td.decoders = make(chan *blockDec, d.o.concurrent)\n\tfor i := 0; i < d.o.concurrent; i++ {\n\t\tdec := newBlockDec(d.o.lowMem)\n\t\tdec.localFrame = newFrameDec(d.o)\n\t\td.decoders <- dec\n\t}\n\n\tif r == nil {\n\t\treturn &d, nil\n\t}\n\treturn &d, d.Reset(r)\n}", "func NewDecoder(r io.Reader) *Decoder {\n\th := sha1.New()\n\treturn &Decoder{\n\t\tr: io.TeeReader(r, h),\n\t\thash: h,\n\t\textReader: bufio.NewReader(nil),\n\t}\n}", "func new_buffer(conn *websocket.Conn, ctrl chan struct{}, txqueuelen int) *Buffer {\n\tbuf := Buffer{conn: conn}\n\tbuf.pending = make(chan []byte, txqueuelen)\n\tbuf.ctrl = ctrl\n\tbuf.cache = make([]byte, packet.PACKET_LIMIT+2)\n\treturn &buf\n}", "func newBrainfog(bfSrc []byte) *brainfog {\n\tbf := &brainfog{inCh: make(chan byte), outCh: make(chan byte)}\n\n\t// Pick the instructions from the source and add them to the program\n\tinstructions := []byte(\"+-<>,.[]\")\n\tfor _, c := range bfSrc {\n\t\tif bytes.Contains(instructions, []byte{c}) {\n\t\t\tbf.program = append(bf.program, c)\n\t\t}\n\t}\n\n\t// Run the program\n\tgo bf.run()\n\treturn bf\n}", "func SwitchNew() (*Switch, error) {\n\tc := C.gtk_switch_new()\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapSwitch(obj), nil\n}", "func NewLob(baseAPI, apiKey, userAgent string) *lob {\n\treturn &lob{\n\t\tBaseAPI: baseAPI,\n\t\tAPIKey: apiKey,\n\t\tUserAgent: userAgent,\n\t}\n}", "func NewWin32LobAppRegistryDetection()(*Win32LobAppRegistryDetection) {\n m := &Win32LobAppRegistryDetection{\n Win32LobAppDetection: *NewWin32LobAppDetection(),\n }\n odataTypeValue := \"#microsoft.graph.win32LobAppRegistryDetection\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func newChunkedBuffer(inChunkSize int64, outChunkSize int64, flags int) intermediateBuffer {\n\treturn &chunkedBuffer{\n\t\toutChunk: outChunkSize,\n\t\tlength: 0,\n\t\tdata: make([]byte, inChunkSize),\n\t\tflags: flags,\n\t}\n}", "func (z *Float) GobDecode(buf []byte) error {}", "func execNewDecoder(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret := json.NewDecoder(args[0].(io.Reader))\n\tp.Ret(1, ret)\n}" ]
[ "0.694183", "0.58746696", "0.5644927", "0.5483799", "0.54688776", "0.5439269", "0.5437573", "0.5433669", "0.5406622", "0.5392829", "0.538944", "0.53797585", "0.5357301", "0.52865094", "0.5255565", "0.52430403", "0.524038", "0.51871175", "0.5151889", "0.50871867", "0.50789195", "0.50571936", "0.5050196", "0.50461984", "0.50137985", "0.5005338", "0.49803555", "0.4965575", "0.4961585", "0.4958156", "0.49044546", "0.48995706", "0.4897439", "0.4878426", "0.48744687", "0.48720926", "0.48720926", "0.48720926", "0.48355544", "0.48266268", "0.48067248", "0.48051795", "0.4802012", "0.47824788", "0.47807342", "0.47681123", "0.47622472", "0.47590783", "0.47566712", "0.47529086", "0.47529086", "0.47529086", "0.47529086", "0.47529086", "0.47529086", "0.47478917", "0.47463214", "0.4741551", "0.47375634", "0.47307327", "0.47277418", "0.47182235", "0.47175947", "0.47061282", "0.46909377", "0.46857184", "0.46823993", "0.4679751", "0.4662513", "0.4655284", "0.46507746", "0.46491715", "0.46418342", "0.46226728", "0.46191674", "0.46190673", "0.46177378", "0.46155444", "0.4615071", "0.4609302", "0.4606184", "0.46028376", "0.46019453", "0.45989516", "0.4594286", "0.458501", "0.45822406", "0.45784652", "0.45713076", "0.45695013", "0.4569404", "0.45550227", "0.4550169", "0.45404944", "0.45393214", "0.45367375", "0.45322824", "0.4524126", "0.4524028", "0.45235556" ]
0.8035107
0
NewGobTranscoder will return a newly initialised transcoder to help with the mundane encoding/decoding operations
func NewGobTranscoder() *GobTranscoder { ret := &GobTranscoder{ inBytes: &bytes.Buffer{}, outBytes: &bytes.Buffer{}, encoderMut: &sync.Mutex{}, decoderMut: &sync.Mutex{}, } ret.encoder = gob.NewEncoder(ret.outBytes) ret.decoder = gob.NewDecoder(ret.inBytes) return ret }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}", "func NewGOBCodec() *GOBCodec {\n\tr := GOBCodec(0)\n\treturn &r\n}", "func FromGob(data []byte, dst interface{}) error {\n\treturn NewGobber().From(data, dst)\n}", "func GOB() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn gob.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn gob.NewDecoder(r)\n\t\t},\n\t)\n}", "func NewGobSerializer() gbus.Serializer {\n\treturn &Gob{\n\t\tlock: &sync.Mutex{},\n\t\tregisteredSchemas: make(map[string]reflect.Type),\n\t}\n}", "func ToGob(src interface{}) ([]byte, error) {\n\treturn NewGobber().To(src)\n}", "func (z *Rat) GobDecode(buf []byte) error {}", "func (d *DFA) GobEncode() ([]byte, error) {\n\tbuffer := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(buffer)\n\tif err := encoder.Encode(d.initial); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode initial state\")\n\t}\n\tif err := encoder.Encode(d.table); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode sparse table\")\n\t}\n\treturn buffer.Bytes(), nil\n}", "func (d *DFA) GobDecode(bs []byte) error {\n\tbuffer := bytes.NewBuffer(bs)\n\tdecoder := gob.NewDecoder(buffer)\n\tvar initial State\n\tvar table []Cell\n\tif err := decoder.Decode(&initial); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode initial state\")\n\t}\n\tif err := decoder.Decode(&table); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode sparse table\")\n\t}\n\td.initial = initial\n\td.table = table\n\treturn nil\n}", "func NewGobDecoderLight() *GobDecoderLight {\n\tret := &GobDecoderLight{\n\t\tbytes: &bytes.Buffer{},\n\t}\n\tret.decoder = gob.NewDecoder(ret.bytes)\n\treturn ret\n}", "func NewEncoder() Encoder {\n return &encoder{}\n}", "func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func GobDecode(b []byte) (interface{}, error) {\n\tvar result interface{}\n\terr := gob.NewDecoder(bytes.NewBuffer(b)).Decode(&result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func NewCoder(buf []byte) *Coder {\n\tret := new(Coder)\n\n\tret.buf = buf\n\t// Figure 15.\n\tret.pos = 2\n\t// Figure 14.\n\tret.low = uint16(buf[0])<<8 | uint16(buf[1])\n\t// Figure 13.\n\tret.rng = 0xFF00\n\tret.cur_byte = -1\n\tif ret.low >= ret.rng {\n\t\tret.low = ret.rng\n\t\tret.pos = len(buf) - 1\n\t}\n\n\t// 3.8.1.3. Initial Values for the Context Model\n\tret.SetTable(DefaultStateTransition)\n\n\treturn ret\n}", "func GobGenerateDecoder(r io.Reader) Decoder {\n\treturn gob.NewDecoder(r)\n}", "func (x *Rat) GobEncode() ([]byte, error) {}", "func GobDecode(ctx context.Context, data []byte, obj interface{}) error {\n\treturn gob.NewDecoder(bytes.NewBuffer(data)).Decode(obj)\n}", "func NewGobEncoderLight() *GobEncoderLight {\n\tret := &GobEncoderLight{\n\t\tbytes: &bytes.Buffer{},\n\t}\n\tret.encoder = gob.NewEncoder(ret.bytes)\n\treturn ret\n}", "func NewEncoding(e Encoder, d Decoder) EncodeDecoder {\n\treturn &Encoding{\n\t\tEncoder: e,\n\t\tDecoder: d,\n\t}\n}", "func gobDecode(buf []byte, into interface{}) error {\n\tif buf == nil {\n\t\treturn nil\n\t}\n\tdec := gob.NewDecoder(bytes.NewReader(buf))\n\treturn dec.Decode(into)\n}", "func DecodeGob(data []byte, v interface{}) error {\n\tb := bytes.NewBuffer(data)\n\treturn gob.NewDecoder(b).Decode(v)\n}", "func init() {\n\tPackages[\"encoding/gob\"] = Package{\n\tBinds: map[string]Value{\n\t\t\"NewDecoder\":\tValueOf(gob.NewDecoder),\n\t\t\"NewEncoder\":\tValueOf(gob.NewEncoder),\n\t\t\"Register\":\tValueOf(gob.Register),\n\t\t\"RegisterName\":\tValueOf(gob.RegisterName),\n\t}, Types: map[string]Type{\n\t\t\"CommonType\":\tTypeOf((*gob.CommonType)(nil)).Elem(),\n\t\t\"Decoder\":\tTypeOf((*gob.Decoder)(nil)).Elem(),\n\t\t\"Encoder\":\tTypeOf((*gob.Encoder)(nil)).Elem(),\n\t\t\"GobDecoder\":\tTypeOf((*gob.GobDecoder)(nil)).Elem(),\n\t\t\"GobEncoder\":\tTypeOf((*gob.GobEncoder)(nil)).Elem(),\n\t}, Proxies: map[string]Type{\n\t\t\"GobDecoder\":\tTypeOf((*P_encoding_gob_GobDecoder)(nil)).Elem(),\n\t\t\"GobEncoder\":\tTypeOf((*P_encoding_gob_GobEncoder)(nil)).Elem(),\n\t}, \n\t}\n}", "func (k *Key) GobDecode(buf []byte) error {\n\tnk, err := NewKeyEncoded(string(buf))\n\tif err != nil {\n\t\treturn err\n\t}\n\t*k = *nk\n\treturn nil\n}", "func (g *Gammas) GobDecode(data []byte) error {\n\tvar err error\n\tfor len(data) > 0 {\n\t\tg2 := new(bn256.G2)\n\t\tdata, err = g2.Unmarshal(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*g = append(*g, g2)\n\t}\n\treturn nil\n}", "func GobDecode(data []byte, obj interface{}) error {\n\treturn gob.NewDecoder(bytes.NewBuffer(data)).Decode(obj)\n}", "func (d *Decoder) GOB(val interface{}) {\n\tgobd := gob.NewDecoder(d.buf)\n\tif err := gobd.Decode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to decode: %v\", err)\n\t}\n}", "func NewBinaryCodec() *BinaryCodec {\n var c *BinaryCodec = &BinaryCodec{}\n c.buf = &bytes.Buffer{}\n return c\n}", "func TestEncodeDecodeGob(t *testing.T) {\n\ttestEncodeDecodeFunctions(t,\n\t\tencodeLeaseRequestGob, encodeLeaseReplyGob,\n\t\tdecodeLeaseRequestGob, decodeLeaseReplyGob)\n}", "func EncodeGob(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tencoder := gob.NewEncoder(&b)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tdata = b.Bytes()\n\treturn\n}", "func (z *Int) GobDecode(buf []byte) error {}", "func GobGenerateEncoder(w io.Writer) Encoder {\n\treturn gob.NewEncoder(w)\n}", "func (t *Tensor) GobDecode(b []byte) error {\n\tr := bytes.NewReader(b)\n\tdec := gob.NewDecoder(r)\n\n\tvar dt tf.DataType\n\terr := dec.Decode(&dt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar shape []int64\n\terr = dec.Decode(&shape)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar tensor *tf.Tensor\n\tswitch dt {\n\tcase tf.String:\n\t\t// TensorFlow Go package currently does not support\n\t\t// string serialization. Let's do it ourselves.\n\t\tvar str string\n\t\terr = dec.Decode(&str)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttensor, err = tf.NewTensor(str)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\ttensor, err = tf.ReadTensor(dt, shape, r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tt.Tensor = tensor\n\treturn nil\n}", "func (p *perceptron) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tencoder := gob.NewEncoder(&buf)\n\n\t// if err := encoder.Encode(&p.weights); err != nil {\n\t// \treturn nil, err\n\t// }\n\n\tif err := encoder.Encode(&p.weightsSF); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := encoder.Encode(&p.weightsTF); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(&p.totals); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(&p.steps); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(p.instancesSeen); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (e Encoding) New() EncodingFunc {\n\tif e < maxEncoding {\n\t\tf := Encodings[e]\n\t\tif f != nil {\n\t\t\treturn f()\n\t\t}\n\t}\n\tlogger.Errorf(\"requested Encoding function %s is unavailable\", strconv.Itoa(int(e)))\n\treturn nil\n}", "func NewCoder(t FullType) Coder {\n\tc, err := inferCoder(t)\n\tif err != nil {\n\t\tpanic(err) // for now\n\t}\n\treturn Coder{c}\n}", "func EncodeGobZlib(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tcompressor, err := zlib.NewWriterLevel(&b, zlib.BestCompression)\n\tif err != nil {\n\t\treturn\n\t}\n\tencoder := gob.NewEncoder(compressor)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = compressor.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = b.Bytes()\n\treturn\n}", "func (z *Float) GobDecode(buf []byte) error {}", "func NewEncoding(encoder string) *Encoding {\n\te := new(Encoding)\n\te.encoder = encoder\n\n\tfor i := 0; i < len(e.decodeMap); i++ {\n\t\te.decodeMap[i] = 0xFF\n\t}\n\tfor i := 0; i < len(encoder); i++ {\n\t\te.decodeMap[encoder[i]] = byte(i)\n\t}\n\treturn e\n}", "func New() *Codec {\n\treturn &Codec{}\n}", "func (t *Type) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gt gobType\n\terr := dec.Decode(&gt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Type: %s\", err)\n\t}\n\tif gt.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Type encoding version %d; only 0 is supported\", gt.Version)\n\t}\n\n\tt.typeImpl = gt.Impl\n\n\treturn nil\n}", "func GobUnmarshal(i interface{}, b []byte) error {\n\tbuf := bytes.NewBuffer(b)\n\tdecoder := gob.NewDecoder(buf)\n\treturn decoder.Decode(i)\n}", "func (g *Gammas) GobEncode() ([]byte, error) {\n\tbuff := bytes.Buffer{}\n\tif g != nil {\n\t\tfor _, g2 := range *g {\n\t\t\tbuff.Write(g2.Marshal())\n\t\t}\n\t}\n\treturn buff.Bytes(), nil\n}", "func EncodeGob(data interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\terr := gob.NewEncoder(b).Encode(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}", "func NewJSONTranscoder() *JSONTranscoder {\n\treturn &JSONTranscoder{}\n}", "func (k *Key) GobEncode() ([]byte, error) {\n\treturn []byte(k.Encode()), nil\n}", "func NewCoder(name string, command GitCmd) *Coder {\n\treturn &Coder{Name: name, Command: command}\n}", "func NewBinaryCodecFrom( b []byte ) *BinaryCodec {\n var c *BinaryCodec = &BinaryCodec{}\n buf := bytes.NewBuffer( b )\n c.buf = buf\n return c\n}", "func (g *Graph) GobDecode(b []byte) (err error) {\n\t// decode into graphGob\n\tgGob := &graphGob{}\n\tbuf := bytes.NewBuffer(b)\n\tdec := gob.NewDecoder(buf)\n\n\terr = dec.Decode(gGob)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// add the vertexes\n\tfor _, key := range gGob.Vertexes {\n\t\tg.Add(key)\n\t}\n\n\t// connect the vertexes\n\tfor key, neighbors := range gGob.Edges {\n\t\tfor otherKey, weight := range neighbors {\n\t\t\tif ok := g.Connect(key, otherKey, weight); !ok {\n\t\t\t\treturn errors.New(\"invalid edge endpoints\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func NewDecoder() *Decoder {\n\treturn &Decoder{\n\t\tbuffer: []byte{},\n\t\tcache: make(map[string]struct{}),\n\t}\n}", "func NewGzip() *Gzip {\n\treturn &Gzip{}\n}", "func (b *Binance) SaveGob(file string) error {\n\tf, _ := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0777)\n\tgob.Register(&stg.KeepStrategy{})\n\tencode := gob.NewEncoder(f)\n\tif err := encode.Encode(b); err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treturn nil\n}", "func NewDecoder(b []byte) *Decoder {\n\treturn &Decoder{orig: b, in: b}\n}", "func (val *Value) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gv gobValue\n\terr := dec.Decode(&gv)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Value: %s\", err)\n\t}\n\tif gv.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Value encoding version %d; only 0 is supported\", gv.Version)\n\t}\n\n\t// big.Float seems to, for some reason, lose its \"pointerness\" when we\n\t// round-trip it, so we'll fix that here.\n\tif bf, ok := gv.V.(big.Float); ok {\n\t\tgv.V = &bf\n\t}\n\n\tval.ty = gv.Ty\n\tval.v = gv.V\n\n\treturn nil\n}", "func (b *blockEnc) initNewEncode() {\n\tb.recentOffsets = [3]uint32{1, 4, 8}\n\tb.litEnc.Reuse = huff0.ReusePolicyNone\n\tb.coders.setPrev(nil, nil, nil)\n}", "func GobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tencoder := gob.NewEncoder(&buff)\n\tif err := encoder.Encode(data); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func (t *Time) GobDecode(data []byte) error {}", "func FromGiB(gib uint64) ByteQuantity {\n\treturn ByteQuantity{\n\t\tQuantity: gib * gbConvert,\n\t}\n}", "func newCompressor(format CompressEncodingType) (compressor, error) {\n\tvar (\n\t\twriter encoder\n\t\terr error\n\t)\n\n\tswitch format {\n\tcase GZIPCompression:\n\t\twriter = gzip.NewWriter(io.Discard)\n\tcase DeflateCompression:\n\t\twriter, err = flate.NewWriter(io.Discard, flate.BestSpeed)\n\t\tif err != nil {\n\t\t\treturn compressor{}, err\n\t\t}\n\tcase NoCompression:\n\t\twriter = nil\n\tdefault:\n\t\treturn compressor{}, fmt.Errorf(\"invalid format: %s\", format)\n\t}\n\n\treturn compressor{\n\t\tformat: format,\n\t\twriter: writer,\n\t}, nil\n}", "func newBrainfog(bfSrc []byte) *brainfog {\n\tbf := &brainfog{inCh: make(chan byte), outCh: make(chan byte)}\n\n\t// Pick the instructions from the source and add them to the program\n\tinstructions := []byte(\"+-<>,.[]\")\n\tfor _, c := range bfSrc {\n\t\tif bytes.Contains(instructions, []byte{c}) {\n\t\t\tbf.program = append(bf.program, c)\n\t\t}\n\t}\n\n\t// Run the program\n\tgo bf.run()\n\treturn bf\n}", "func GobEncode(v interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := gob.NewEncoder(&buf).Encode(&v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func NewEncoder() *Encoder {\n\tselect {\n\tcase enc := <-encObjPool:\n\t\treturn enc\n\tdefault:\n\t\treturn &Encoder{}\n\t}\n}", "func GobDecodeFromFile(filename string, object interface{}) error {\n file, err := os.Open(filename)\n if err != nil {\n // Might be caused by file does not exist\n return gobDebug.Error(err)\n }\n defer file.Close()\n decoder := gob.NewDecoder(file)\n if err := decoder.Decode(object); err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func gobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tenc := gob.NewEncoder(&buff)\n\terr := enc.Encode(data)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func (t *Timestamp) GobDecode(data []byte) error {\n\tvar tm time.Time\n\n\tif err := tm.UnmarshalBinary(data); err != nil {\n\t\treturn err\n\t}\n\n\t*t = Timestamp(tm)\n\n\treturn nil\n}", "func NewCodec() *Codec {\n\treturn &Codec{}\n}", "func (s *Store) GobDecode(data []byte) error {\n\ts.access.Lock()\n\tdefer s.access.Unlock()\n\n\tbuf := bytes.NewBuffer(data)\n\n\tdecoder := gob.NewDecoder(buf)\n\tvar version uint8\n\terr := decoder.Decode(&version)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = decoder.Decode(&s.data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor key, _ := range s.data {\n\t\ts.doKeyChanged(key)\n\t}\n\n\treturn nil\n}", "func New(alphabet []byte) *Encoding {\n\tenc := &Encoding{}\n\tcopy(enc.alphabet[:], alphabet[:])\n\tfor i := range enc.decodeMap {\n\t\tenc.decodeMap[i] = -1\n\t}\n\tfor i, b := range enc.alphabet {\n\t\tenc.decodeMap[b] = int64(i)\n\t}\n\treturn enc\n}", "func NewBinaryEncoding(c Constructor) *BinaryEncoding {\n\treturn &BinaryEncoding{Constructor: c}\n}", "func (s *CountMinSketch) GobDecode(data []byte) error {\n\tbuf := bytes.NewBuffer(data)\n\t_, err := s.ReadFrom(buf)\n\treturn err\n}", "func NewEncoding(encoder string) (*Encoding, error) {\n\tif len(encoder) != 58 {\n\t\treturn nil, errors.New(\"base58: encoding alphabet is not 58-bytes\")\n\t}\n\tfor i := 0; i < len(encoder); i++ {\n\t\tif encoder[i] == '\\n' || encoder[i] == '\\r' {\n\t\t\treturn nil, errors.New(\"base58: encoding alphabet contains newline character\")\n\t\t}\n\t}\n\te := new(Encoding)\n\tfor i := range e.decodeMap {\n\t\te.decodeMap[i] = -1\n\t}\n\tfor i := range encoder {\n\t\te.encode[i] = byte(encoder[i])\n\t\te.decodeMap[e.encode[i]] = i\n\t}\n\treturn e, nil\n}", "func NewEncoder() Encoder {\n\treturn &encoder{\n\t\tbuf: new(bytes.Buffer),\n\t}\n}", "func NewEncoder() *Encoder {\n\treturn &Encoder{buf: &bytes.Buffer{}, tmp: &bytes.Buffer{}}\n}", "func (t Time) GobEncode() ([]byte, error) {}", "func gobEncode(value interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := gob.NewEncoder(&buf)\n\terr := enc.Encode(value)\n\treturn buf.Bytes(), err\n}", "func gobInfoDecode(gobBytes []byte) (*storage.GobInfo, error) {\n\tgobInfo := &storage.GobInfo{}\n\tbuf := bytes.NewReader(gobBytes)\n\tgobDec := realgob.NewDecoder(buf)\n\terr := gobDec.Decode(gobInfo)\n\treturn gobInfo, err\n}", "func GbToUtf8(s []byte, encoding string) ([]byte, error) {\n\tvar t transform.Transformer\n\tswitch encoding {\n\tcase \"gbk\":\n\t\tt = simplifiedchinese.GBK.NewDecoder()\n\tcase \"gb18030\":\n\t\tt = simplifiedchinese.GB18030.NewDecoder()\n\t}\n\treader := transform.NewReader(bytes.NewReader(s), t)\n\td, e := ioutil.ReadAll(reader)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn d, nil\n}", "func BenchmarkEncodingGobTweetStruct(b *testing.B) {\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tvar bb bytes.Buffer\n\t\tenc := gob.NewEncoder(&bb)\n\t\t_ = enc.Encode(tw)\n\t\t_ = bb.Bytes()\n\t}\n}", "func (d *Decimal) GobDecode(data []byte) error {\n\treturn d.UnmarshalBinary(data)\n}", "func newBackloader(conf *options) (*backloader, error) {\n\t// validate config\n\tif err := conf.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create producer\n\tp, err := bus.NewProducer(&conf.Bus)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &backloader{\n\t\tbusProducer: p,\n\t\tconfig: conf,\n\t}, nil\n}", "func NewEncoding(encoder string) *Encoding {\n\te := new(Encoding)\n\tencode := make([]uint16, 1028)\n\ti := 0\n\tfor _, r := range encoder {\n\t\tif r&0xFFE0 != r {\n\t\t\tpanic(\"encoding alphabet containing illegal character\")\n\t\t}\n\t\tif i >= len(encode) {\n\t\t\tbreak\n\t\t}\n\t\tencode[i] = uint16(r)\n\t\ti++\n\t}\n\tif i < len(encode) {\n\t\tpanic(\"encoding alphabet is not 1028-characters long\")\n\t}\n\tsort.Slice(encode, func(i, j int) bool { return encode[i] < encode[j] })\n\te.splitter = encode[4]\n\tcopy(e.encodeA[:], encode[4:])\n\tcopy(e.encodeB[:], encode[:4])\n\n\tfor i := 0; i < len(e.decodeMap); i++ {\n\t\te.decodeMap[i] = 0xFFFD\n\t}\n\tfor i := 0; i < len(e.encodeA); i++ {\n\t\tidx := e.encodeA[i] >> blockBit\n\t\tif e.decodeMap[idx] != 0xFFFD {\n\t\t\tpanic(\"encoding alphabet have repeating character\")\n\t\t}\n\t\te.decodeMap[idx] = uint16(i) << blockBit\n\t}\n\tfor i := 0; i < len(e.encodeB); i++ {\n\t\tidx := e.encodeB[i] >> blockBit\n\t\tif e.decodeMap[idx] != 0xFFFD {\n\t\t\tpanic(\"encoding alphabet have repeating character\")\n\t\t}\n\t\te.decodeMap[idx] = uint16(i) << blockBit\n\t}\n\treturn e\n}", "func Constructor() Codec {\n\treturn Codec{}\n}", "func NewCodec() (*Codec, error) {\n\n\toptions := cbor.CanonicalEncOptions()\n\toptions.Time = cbor.TimeRFC3339Nano\n\tencoder, err := options.EncMode()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not initialize encoder: %w\", err)\n\t}\n\n\tcompressor, err := zstd.NewWriter(nil,\n\t\tzstd.WithEncoderLevel(zstd.SpeedDefault),\n\t\tzstd.WithEncoderDict(Dictionary),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not initialize compressor: %w\", err)\n\t}\n\n\tdecompressor, err := zstd.NewReader(nil,\n\t\tzstd.WithDecoderDicts(Dictionary),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not initialize decompressor: %w\", err)\n\t}\n\n\tc := Codec{\n\t\tencoder: encoder,\n\t\tcompressor: compressor,\n\t\tdecompressor: decompressor,\n\t}\n\n\treturn &c, nil\n}", "func NewDbGapTranslator(publicKey, selfIssuer string, signer kms.Signer) (*DbGapTranslator, error) {\n\tif len(selfIssuer) == 0 {\n\t\treturn nil, fmt.Errorf(\"NewDbGapTranslator failed, selfIssuer or signingPrivateKey is empty\")\n\t}\n\n\tjku := strings.TrimSuffix(selfIssuer, \"/\") + \"/.well-known/jwks.json\"\n\n\tt := &DbGapTranslator{\n\t\tvisaIssuer: selfIssuer,\n\t\tvisaJKU: jku,\n\t\tsigner: signer,\n\t}\n\n\tblock, _ := pem.Decode([]byte(publicKey))\n\tif block == nil {\n\t\treturn t, nil\n\t}\n\tpub, err := x509.ParsePKCS1PublicKey(block.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing public key: %v\", err)\n\t}\n\tt.publicKey = pub\n\n\treturn t, nil\n}", "func New() *Coder {\n\tc := &Coder{\n\t\talphabet: defaultAlphabet,\n\t\tblockSize: defaultBlockSize,\n\t\tminLength: defaultMinLength,\n\t\tmask: mask(defaultBlockSize),\n\t\tmapping: mapping(defaultBlockSize),\n\t}\n\treturn c\n}", "func NewRawBinaryTranscoder() *RawBinaryTranscoder {\n\treturn &RawBinaryTranscoder{}\n}", "func (set *AppleSet) GobDecode(b []byte) error {\n\tset.s.Lock()\n\tdefer set.s.Unlock()\n\n\tbuf := bytes.NewBuffer(b)\n\treturn gob.NewDecoder(buf).Decode(&set.m)\n}", "func GobMarshal(i interface{}) ([]byte, error) {\n\tbuf := bytes.NewBuffer(nil)\n\tencoder := gob.NewEncoder(buf)\n\terr := encoder.Encode(i)\n\treturn buf.Bytes(), err\n}", "func (t Type) GobEncode() ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\n\tgt := gobType{\n\t\tVersion: 0,\n\t\tImpl: t.typeImpl,\n\t}\n\n\terr := enc.Encode(gt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error encoding cty.Type: %s\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func RatGobDecode(z *big.Rat, buf []byte) error", "func (d Decimal) GobEncode() ([]byte, error) {\n\treturn d.MarshalBinary()\n}", "func newBuffer(b []byte) *buffer {\n\treturn &buffer{proto.NewBuffer(b), 0}\n}", "func GobEncode(value interface{}) []byte {\n buf := bytes.NewBuffer(make([]byte, 0, 1024))\n encoder := gob.NewEncoder(buf)\n // encode unknown type might cause some error\n err := encoder.Encode(value)\n if err != nil {\n gobDebug.Panicf(\"Failed to encode a value: %+v\\n%v\\n\", value, err)\n }\n return buf.Bytes()\n}", "func New(outputFile string, imported ...string) *TGen {\n\tos.Remove(outputFile)\n\tf, e := os.Create(outputFile)\n\terr.Panic(e)\n\toutputFile = filepath.Base(outputFile)\n\toutputFile, _ = extfilepath.Ext(outputFile)\n\tf.Write([]byte(\"// It's file auto generate encodejsonFast\\n\\n\"))\n\tf.Write([]byte(\"package \" + outputFile + \"\\n\\n\"))\n\n\tresultImport := \". \\\"github.com/Cergoo/gol/encode/json/common\\\"\\n\"\n\tresultImport += \"\\\"strconv\\\"\\n\"\n\tfor i := range imported {\n\t\tresultImport += \"\\\"\" + imported[i] + \"\\\"\\n\"\n\t}\n\tf.Write([]byte(\"import (\\n\" + resultImport + \")\\n\"))\n\n\treturn &TGen{\n\t\tstackName: make(stack.TStack, 0, 10),\n\t\ttmpNameGen: tmpName.New(),\n\t\tf: f,\n\t}\n}", "func New() CodeBuilder {\n\treturn &codeBuilder{}\n}", "func NewPBridgeTransactor(address common.Address, transactor bind.ContractTransactor) (*PBridgeTransactor, error) {\n\tcontract, err := bindPBridge(address, nil, transactor, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PBridgeTransactor{contract: contract}, nil\n}", "func NewDisassemblerBTC() Disassembler { return disassemblerBTC{} }", "func (s *CountMinSketch) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\t_, err := s.WriteTo(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func BenchmarkDecodingGobTweet(b *testing.B) {\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttw := Tweet{}\n\t\tdec := gob.NewDecoder(&gobTw)\n\t\terr := dec.Decode(&tw)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"Error unmarshaling json: %v\", err)\n\t\t}\n\t}\n}", "func New() Go { return Go{} }", "func NewGeneratorBTC() Generator { return generatorBTC{} }" ]
[ "0.68015623", "0.6645202", "0.63813996", "0.6066566", "0.5844885", "0.5745243", "0.5729538", "0.56793123", "0.5636251", "0.5586641", "0.55709475", "0.5561647", "0.55515313", "0.5542255", "0.5485858", "0.5476409", "0.5442238", "0.5441508", "0.5416794", "0.5413589", "0.53800493", "0.5347492", "0.5346695", "0.5337115", "0.530416", "0.5270972", "0.52421254", "0.52324903", "0.5229738", "0.5216559", "0.52126676", "0.51835424", "0.5113932", "0.5101827", "0.50974", "0.5094436", "0.50760067", "0.50684756", "0.5059554", "0.505238", "0.5040723", "0.50153524", "0.50035673", "0.5002909", "0.49963227", "0.49840978", "0.49829522", "0.49729368", "0.4946642", "0.4945462", "0.49365336", "0.4926538", "0.49150118", "0.49082452", "0.49021643", "0.4896634", "0.4885767", "0.48823455", "0.4878865", "0.48731276", "0.4870706", "0.48705134", "0.4868727", "0.4861767", "0.48554116", "0.4836428", "0.48273683", "0.4824233", "0.48184636", "0.48158857", "0.48145726", "0.4808234", "0.47916493", "0.47849604", "0.4772416", "0.47463068", "0.47191772", "0.47112122", "0.47101557", "0.47077757", "0.47053728", "0.47036606", "0.4699778", "0.46961877", "0.46931037", "0.46853733", "0.46820295", "0.4680798", "0.4679355", "0.46669164", "0.4661228", "0.4660888", "0.46571603", "0.46503416", "0.46465912", "0.46273905", "0.46176887", "0.461319", "0.46121734", "0.4610543" ]
0.83728933
0
EncodeType will convert give given pointer into a gob encoded byte set, and return them
func (g *GobTranscoder) EncodeType(t interface{}) ([]byte, error) { g.encoderMut.Lock() defer func() { g.outBytes.Reset() g.encoderMut.Unlock() }() err := g.encoder.Encode(t) if err != nil { return nil, err } return g.outBytes.Bytes(), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t Type) GobEncode() ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\n\tgt := gobType{\n\t\tVersion: 0,\n\t\tImpl: t.typeImpl,\n\t}\n\n\terr := enc.Encode(gt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error encoding cty.Type: %s\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func GobEncode(value interface{}) []byte {\n buf := bytes.NewBuffer(make([]byte, 0, 1024))\n encoder := gob.NewEncoder(buf)\n // encode unknown type might cause some error\n err := encoder.Encode(value)\n if err != nil {\n gobDebug.Panicf(\"Failed to encode a value: %+v\\n%v\\n\", value, err)\n }\n return buf.Bytes()\n}", "func (t *capsuleType) GobEncode() ([]byte, error) {\n\treturn nil, fmt.Errorf(\"cannot gob-encode capsule type %q\", t.FriendlyName(friendlyTypeName))\n}", "func (g *GobEncoderLight) EncodeType(t interface{}) ([]byte, error) {\n\tdefer func() {\n\t\tg.bytes.Reset()\n\t}()\n\terr := g.encoder.Encode(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn g.bytes.Bytes(), nil\n}", "func gobEncode(value interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := gob.NewEncoder(&buf)\n\terr := enc.Encode(value)\n\treturn buf.Bytes(), err\n}", "func EncodeGob(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tencoder := gob.NewEncoder(&b)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tdata = b.Bytes()\n\treturn\n}", "func gobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tenc := gob.NewEncoder(&buff)\n\terr := enc.Encode(data)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func GobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tencoder := gob.NewEncoder(&buff)\n\tif err := encoder.Encode(data); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func (x *Rat) GobEncode() ([]byte, error) {}", "func (t Time) GobEncode() ([]byte, error) {}", "func GobEncode(v interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := gob.NewEncoder(&buf).Encode(&v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (fc fctuple) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tencoder := gob.NewEncoder(&buf)\n\n\tif err := encoder.Encode(&fc.feature); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(fc.POSTag); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (info *ImageInfoType) GobEncode() (buf []byte, err error) {\n\tfields := []interface{}{info.data, info.smask, info.n, info.w, info.h, info.cs,\n\t\tinfo.pal, info.bpc, info.f, info.dp, info.trns, info.scale, info.dpi}\n\tw := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(w)\n\tfor j := 0; j < len(fields) && err == nil; j++ {\n\t\terr = encoder.Encode(fields[j])\n\t}\n\tif err == nil {\n\t\tbuf = w.Bytes()\n\t}\n\treturn\n}", "func RatGobEncode(x *big.Rat,) ([]byte, error)", "func EncodeGob(data interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\terr := gob.NewEncoder(b).Encode(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}", "func packType(v interface{}) byte {\n\tswitch v.(type) {\n\tcase nil:\n\t\treturn ptNone\n\tcase string:\n\t\treturn ptString\n\tcase int32:\n\t\treturn ptInt\n\tcase float32:\n\t\treturn ptFloat\n\tcase uint32:\n\t\treturn ptPtr\n\tcase []uint16:\n\t\treturn ptWString\n\tcase color.NRGBA:\n\t\treturn ptColor\n\tcase uint64:\n\t\treturn ptUint64\n\tdefault:\n\t\tpanic(\"invalid vdf.Node\")\n\t}\n}", "func IntGobEncode(x *big.Int,) ([]byte, error)", "func EncodeGobZlib(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tcompressor, err := zlib.NewWriterLevel(&b, zlib.BestCompression)\n\tif err != nil {\n\t\treturn\n\t}\n\tencoder := gob.NewEncoder(compressor)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = compressor.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = b.Bytes()\n\treturn\n}", "func gobInfoEncode(gob *storage.GobInfo) ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\tgobEnc := realgob.NewEncoder(buf)\n\terr := gobEnc.Encode(gob)\n\treturn buf.Bytes(), err\n}", "func (val Value) GobEncode() ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\n\tgv := gobValue{\n\t\tVersion: 0,\n\t\tTy: val.ty,\n\t\tV: val.v,\n\t}\n\n\terr := enc.Encode(gv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error encoding cty.Value: %s\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (k *Key) GobEncode() ([]byte, error) {\n\treturn []byte(k.Encode()), nil\n}", "func (sf singleFeature) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tencoder := gob.NewEncoder(&buf)\n\n\tif err := encoder.Encode(sf.featureType); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(sf.value); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func Encode(data interface{}) []byte {\n v := Value{data}\n return v.Encode()\n}", "func (w *Writer) getType(obj interface{}, length int) []byte {\n\t// check length\n\tw.checkLength(length)\n\tvar tmp = make([]byte, 8)\n\n\tswitch objType := obj.(type) {\n\tcase int8:\n\t\ttmp[0] = byte(objType)\n\tcase uint8:\n\t\ttmp[0] = byte(objType)\n\tcase int16:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\tcase uint16:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\tcase int32:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\tcase uint32:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\tcase int:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\tcase int64:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\t\ttmp[4] = byte(objType >> 32)\n\t\ttmp[5] = byte(objType >> 40)\n\t\ttmp[6] = byte(objType >> 48)\n\t\ttmp[7] = byte(objType >> 56)\n\tcase uint64:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\t\ttmp[4] = byte(objType >> 32)\n\t\ttmp[5] = byte(objType >> 40)\n\t\ttmp[6] = byte(objType >> 48)\n\t\ttmp[7] = byte(objType >> 56)\n\tdefault:\n\t\tlog.Error(\"Unknown data type:\", reflect.TypeOf(obj))\n\t\treturn nil\n\t}\n\n\treturn tmp[:length]\n}", "func (t Tensor) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := gob.NewEncoder(&buf)\n\n\terr := enc.Encode(t.DataType())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = enc.Encode(t.Shape())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch t.DataType() {\n\tcase tf.String:\n\t\t// TensorFlow Go package currently does not support\n\t\t// string serialization. Let's do it ourselves.\n\t\terr = enc.Encode(t.Tensor.Value().(string))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\t_, err = t.WriteContentsTo(&buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func encodePtrArg(t reflect.Type) bool {\n\tif t.Implements(binaryMarshalerType) || t.Implements(textMarshalerType) {\n\t\treturn false\n\t}\n\treturn t.Kind() == reflect.Struct || t.Kind() == reflect.Array\n}", "func (_type Type) MarshalGQL(w io.Writer) {\n\tio.WriteString(w, strconv.Quote(_type.String()))\n}", "func (_type Type) MarshalGQL(w io.Writer) {\n\tio.WriteString(w, strconv.Quote(_type.String()))\n}", "func FloatGobEncode(x *big.Float,) ([]byte, error)", "func (set AppleSet) GobEncode() ([]byte, error) {\n\tset.s.RLock()\n\tdefer set.s.RUnlock()\n\n\tbuf := &bytes.Buffer{}\n\terr := gob.NewEncoder(buf).Encode(set.m)\n\treturn buf.Bytes(), err\n}", "func GobMarshal(i interface{}) ([]byte, error) {\n\tbuf := bytes.NewBuffer(nil)\n\tencoder := gob.NewEncoder(buf)\n\terr := encoder.Encode(i)\n\treturn buf.Bytes(), err\n}", "func (t Timestamp) GobEncode() ([]byte, error) {\n\treturn t.Time().MarshalBinary()\n}", "func encodeNodeWithType(n Node, w *io.BinWriter) {\n\tw.WriteB(byte(n.Type()))\n\tn.EncodeBinary(w)\n}", "func (b *ProxyTypeBox) Encode(buf *bin.Buffer) error {\n\tif b == nil || b.ProxyType == nil {\n\t\treturn fmt.Errorf(\"unable to encode ProxyTypeClass as nil\")\n\t}\n\treturn b.ProxyType.Encode(buf)\n}", "func (t *Type) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gt gobType\n\terr := dec.Decode(&gt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Type: %s\", err)\n\t}\n\tif gt.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Type encoding version %d; only 0 is supported\", gt.Version)\n\t}\n\n\tt.typeImpl = gt.Impl\n\n\treturn nil\n}", "func (g *Gammas) GobEncode() ([]byte, error) {\n\tbuff := bytes.Buffer{}\n\tif g != nil {\n\t\tfor _, g2 := range *g {\n\t\t\tbuff.Write(g2.Marshal())\n\t\t}\n\t}\n\treturn buff.Bytes(), nil\n}", "func (p *perceptron) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tencoder := gob.NewEncoder(&buf)\n\n\t// if err := encoder.Encode(&p.weights); err != nil {\n\t// \treturn nil, err\n\t// }\n\n\tif err := encoder.Encode(&p.weightsSF); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := encoder.Encode(&p.weightsTF); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(&p.totals); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(&p.steps); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(p.instancesSeen); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (e *Encoder) EncodePtr(ps ...unsafe.Pointer) []byte {\n\n\tengines := e.engines\n\tfor i := 0; i < len(engines) && i < len(ps); i++ {\n\t\tengines[i](e, ps[i])\n\t\te.objPos = 0\n\t}\n\treturn e.reset()\n}", "func RegisterType(x interface{}) {\n\tgob.Register(x)\n}", "func (bs endecBytes) Type() byte {\n\treturn bs[0] >> 4\n}", "func BenchmarkEncodingGobTweetStruct(b *testing.B) {\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tvar bb bytes.Buffer\n\t\tenc := gob.NewEncoder(&bb)\n\t\t_ = enc.Encode(tw)\n\t\t_ = bb.Bytes()\n\t}\n}", "func Encode(e encoding.Type, input interface{}) ([]byte, error) {\n\treturn encoding.Format(e).Encode(input)\n}", "func Encode(val interface{}, opts Options) ([]byte, error) {\n var ret []byte\n\n buf := newBytes()\n err := encodeInto(&buf, val, opts)\n\n /* check for errors */\n if err != nil {\n freeBytes(buf)\n return nil, err\n }\n\n /* htmlescape or correct UTF-8 if opts enable */\n old := buf\n buf = encodeFinish(old, opts)\n pbuf := ((*rt.GoSlice)(unsafe.Pointer(&buf))).Ptr\n pold := ((*rt.GoSlice)(unsafe.Pointer(&old))).Ptr\n\n /* return when allocated a new buffer */\n if pbuf != pold {\n freeBytes(old)\n return buf, nil\n }\n\n /* make a copy of the result */\n ret = make([]byte, len(buf))\n copy(ret, buf)\n\n freeBytes(buf)\n /* return the buffer into pool */\n return ret, nil\n}", "func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {\n\tif t.Implements(marshalerType) {\n\t\treturn marshalerEncoder\n\t}\n\n\tif t.Implements(binaryMarshalerType) {\n\t\treturn binaryMarshalerEncoder\n\t}\n\n\tif t.Implements(gobEncoderType) {\n\t\treturn gobEncoder\n\t}\n\n\tif t.Kind() != reflect.Ptr && allowAddr {\n\t\tft := reflect.PtrTo(t)\n\t\tif ft.Implements(marshalerType) {\n\t\t\treturn addrEncoder(marshalerEncoder, newTypeEncoder(t, false))\n\t\t}\n\t\tif ft.Implements(binaryMarshalerType) {\n\t\t\treturn addrEncoder(binaryMarshalerEncoder, newTypeEncoder(t, false))\n\t\t}\n\t\tif ft.Implements(gobEncoderType) {\n\t\t\treturn addrEncoder(gobEncoder, newTypeEncoder(t, false))\n\t\t}\n\t}\n\n\tswitch t.Kind() {\n\tcase reflect.Bool:\n\t\treturn boolEncoder\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn intEncoder\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn uintEncoder\n\tcase reflect.Float32:\n\t\treturn float32Encoder\n\tcase reflect.Float64:\n\t\treturn float64Encoder\n\tcase reflect.Complex64:\n\t\treturn complex64Encoder\n\tcase reflect.Complex128:\n\t\treturn complex128Encoder\n\tcase reflect.String:\n\t\treturn stringEncoder\n\tcase reflect.Map:\n\t\tif !isNative(t.Key().Kind(), true) { // eventually will support pointer keys and structs, but not gonna happen until it's needed\n\t\t\treturn invalidEncoder\n\t\t}\n\t\treturn newMapEncoder(t)\n\tcase reflect.Slice, reflect.Array:\n\t\treturn newSliceEncoder(t.Elem())\n\tcase reflect.Struct:\n\t\treturn newStructEncoder(t)\n\tcase reflect.Ptr:\n\t\treturn ptrEncoder(newTypeEncoder(t.Elem(), false))\n\tcase reflect.Interface:\n\t\treturn ifaceEncoder\n\t}\n\treturn invalidEncoder\n}", "func interfaceEncode(enc *gob.Encoder, p Pythagoras) {\n\t//the encode will fail unless the concrete type has been\n\t//registered.We registered it in the calling function.\n\n\t//Pass pointer to interface so Encode sees(and hence sends) a value of interface type.\n\t//If we passed p directly it would see the concrete typoe instead.\n\t//See the blog post,\"The laws of Reflection\"for background\n\n\terr := enc.Encode(&p)\n\tif err != nil {\n\t\tlog.Fatal(\"Encode:\", err)\n\t}\n}", "func (e *encoder) marshalPointer(t reflect.Type, v reflect.Value, n nestedTypeData) error {\n\tswitch t.Elem().Kind() {\n\tcase reflect.Slice:\n\t\treturn e.marshalVector(t.Elem(), v.Elem(), n)\n\tcase reflect.String:\n\t\treturn e.marshalString(v.Elem(), n)\n\tcase reflect.Struct:\n\t\treturn e.marshalStructOrUnionPointer(t, v)\n\t}\n\treturn newValueError(ErrInvalidPointerType, t.Name())\n}", "func (t *TGen) Encode(val interface{}) {\n\tvalType := reflect.TypeOf(val)\n\tt.src = \"\\nfunc Encode(buf []byte, t \" + TypeName(valType.String(), true) + \") []byte {\\n\"\n\tt.stackName.Push(\"t\")\n\tt.encode(valType)\n\tt.src += \"return buf \\n}\\n\\n\"\n\tt.stackName.Clear()\n\tt.tmpNameGen.Clear()\n\t_, e := t.f.Write([]byte(t.src))\n\terr.Panic(e)\n}", "func (g *Generator) genTypeEncoder(t reflect.Type, in string, tags fieldTags, indent int, assumeNonEmpty bool) error {\n\tws := strings.Repeat(\" \", indent)\n\n\tmarshalerIface := reflect.TypeOf((*easyjson.Marshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(marshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"(\"+in+\").MarshalEasyJSON(out)\")\n\t\treturn nil\n\t}\n\n\tmarshalerIface = reflect.TypeOf((*json.Marshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(marshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"out.Raw( (\"+in+\").MarshalJSON() )\")\n\t\treturn nil\n\t}\n\n\tmarshalerIface = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(marshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"out.RawText( (\"+in+\").MarshalText() )\")\n\t\treturn nil\n\t}\n\n\terr := g.genTypeEncoderNoCheck(t, in, tags, indent, assumeNonEmpty)\n\treturn err\n}", "func Marshal(data interface{}, typ DataFormat) []byte {\n\tswitch typ {\n\tcase GOB:\n\t\tvar buf bytes.Buffer\n\t\tgob.NewEncoder(&buf).Encode(data)\n\t\treturn buf.Bytes()\n\n\tcase JSON:\n\t\tbuf, err := json.Marshal(data)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn buf\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unrecognized data type\"))\n\t}\n}", "func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}", "func ToGob(src interface{}) ([]byte, error) {\n\treturn NewGobber().To(src)\n}", "func Marshal(val interface{}) ([]byte, error) {}", "func (v valuePointer) Encode() []byte {\n\tb := make([]byte, valuePointerSize)\n\n\t// Copy over the content from p to b.\n\t*(*valuePointer)(unsafe.Pointer(&b[0])) = v\n\n\treturn b\n}", "func (v *Value) Encode() []byte {\n var output []byte\n switch d := v.data.(type) {\n case []byte:\n output = d\n case string:\n output = []byte(d)\n case bool:\n output = strconv.AppendBool(output, d)\n case float32, float64:\n f64 := reflect.ValueOf(v.data).Float()\n output = strconv.AppendFloat(output, f64, 'g', -1, 64)\n case int, int8, int16, int32, int64:\n i64 := reflect.ValueOf(v.data).Int()\n output = strconv.AppendInt(output, i64, 10)\n case uint, uint8, uint16, uint32, uint64:\n u64 := reflect.ValueOf(v.data).Uint()\n output = strconv.AppendUint(output, u64, 10)\n default:\n if j, e := json.Marshal(v.data); e == nil {\n output = j\n } else {\n panic(\"Value.Encode: \" + e.Error())\n }\n }\n return output\n}", "func (s *CountMinSketch) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\t_, err := s.WriteTo(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (*Store) RegisterType(i interface{}) {\n\tgob.Register(i)\n}", "func (d *DFA) GobEncode() ([]byte, error) {\n\tbuffer := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(buffer)\n\tif err := encoder.Encode(d.initial); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode initial state\")\n\t}\n\tif err := encoder.Encode(d.table); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode sparse table\")\n\t}\n\treturn buffer.Bytes(), nil\n}", "func gobFlattenRegister(t reflect.Type) {\n\tif t.Kind() == reflect.Interface {\n\t\treturn\n\t}\n\tfor t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\tpz := reflect.New(t)\n\tgob.Register(pz.Elem().Interface())\n}", "func EncodingType(value string) Option {\n\treturn addParam(\"encoding-type\", value)\n}", "func (a *Array) GobEncode() ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\n\terr := checkErr(\n\t\tenc.Encode(a.bits),\n\t\tenc.Encode(a.length),\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"bit: encode failed (%v)\", err)\n\t}\n\n\treturn buf.Bytes(), err\n}", "func encode(buf *bytes.Buffer, v reflect.Value) error {\n\tswitch v.Kind() {\n\tcase reflect.Invalid: // ignore\n\n\tcase reflect.Float32, reflect.Float64:\n\t\tfmt.Fprintf(buf, \"%f\", v.Float())\n\n\tcase reflect.Complex128, reflect.Complex64:\n\t\tc := v.Complex()\n\t\tfmt.Fprintf(buf, \"#C(%f %f)\", real(c), imag(c))\n\n\tcase reflect.Bool:\n\t\tif v.Bool() {\n\t\t\tfmt.Fprintf(buf, \"t\")\n\t\t}\n\n\tcase reflect.Interface:\n\t\t// type output\n\t\tt := v.Elem().Type()\n\n\t\tleftBuffer := new(bytes.Buffer)\n\t\trightBuffer := new(bytes.Buffer)\n\n\t\tif t.Name() == \"\" { // 名前がつけられてないtypeはそのまま表示する\n\t\t\tfmt.Fprintf(leftBuffer, \"%q\", t)\n\t\t} else {\n\t\t\tfmt.Fprintf(leftBuffer, \"\\\"%s.%s\\\" \", t.PkgPath(), t.Name()) //一意ではないとはこういうことか?\n\t\t}\n\n\t\t// value output\n\t\tif err := encode(rightBuffer, v.Elem()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(rightBuffer.Bytes()) != 0 {\n\t\t\tbuf.WriteByte('(')\n\t\t\tbuf.Write(leftBuffer.Bytes())\n\t\t\tbuf.WriteByte(' ')\n\t\t\tbuf.Write(rightBuffer.Bytes())\n\t\t\tbuf.WriteByte(')')\n\t\t}\n\n\tcase reflect.Int, reflect.Int8, reflect.Int16,\n\t\treflect.Int32, reflect.Int64:\n\t\tfmt.Fprintf(buf, \"%d\", v.Int())\n\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16,\n\t\treflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\tfmt.Fprintf(buf, \"%d\", v.Uint())\n\n\tcase reflect.String:\n\t\tfmt.Fprintf(buf, \"%q\", v.String())\n\n\tcase reflect.Ptr:\n\t\treturn encode(buf, v.Elem())\n\n\tcase reflect.Array, reflect.Slice: // (value ...)\n\n\t\tcontent := new(bytes.Buffer)\n\n\t\tisFirst := true\n\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tif isFirst {\n\t\t\t\tisFirst = false\n\t\t\t\tcontent.WriteByte(' ')\n\t\t\t}\n\t\t\tif err := encode(content, v.Index(i)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif len(content.Bytes()) != 0 {\n\t\t\tbuf.WriteByte('(')\n\t\t\tbuf.Write(content.Bytes())\n\t\t\tbuf.WriteByte(')')\n\t\t}\n\n\tcase reflect.Struct: // ((name value) ...)\n\n\t\tcontent := new(bytes.Buffer)\n\n\t\tisFirst := true\n\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\trightBuffer := new(bytes.Buffer)\n\t\t\tif err := encode(rightBuffer, v.Field(i)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(rightBuffer.Bytes()) != 0 {\n\t\t\t\tif isFirst {\n\t\t\t\t\tcontent.WriteByte(' ')\n\t\t\t\t\tisFirst = false\n\t\t\t\t}\n\t\t\t\tcontent.WriteByte('(')\n\t\t\t\tfmt.Fprintf(content, \"%s\", v.Type().Field(i).Name)\n\t\t\t\tcontent.WriteByte(' ')\n\t\t\t\tcontent.Write(rightBuffer.Bytes())\n\t\t\t\tcontent.WriteByte(')')\n\t\t\t}\n\t\t}\n\n\t\tif len(content.Bytes()) != 0 {\n\t\t\tbuf.WriteByte('(')\n\t\t\tbuf.Write(content.Bytes())\n\t\t\tbuf.WriteByte(')')\n\t\t}\n\n\tcase reflect.Map: // ((key value) ...)\n\t\tisFirst := true\n\t\tcontent := new(bytes.Buffer)\n\n\t\tfor _, key := range v.MapKeys() {\n\t\t\tif isFirst {\n\t\t\t\tcontent.WriteByte(' ')\n\t\t\t\tisFirst = false\n\t\t\t}\n\n\t\t\tleftBuffer := new(bytes.Buffer)\n\t\t\trightBuffer := new(bytes.Buffer)\n\n\t\t\tif err := encode(leftBuffer, key); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := encode(rightBuffer, v.MapIndex(key)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(rightBuffer.Bytes()) != 0 {\n\t\t\t\tcontent.WriteByte('(')\n\t\t\t\tcontent.Write(leftBuffer.Bytes())\n\t\t\t\tcontent.WriteByte(' ')\n\t\t\t\tcontent.Write(rightBuffer.Bytes())\n\t\t\t\tcontent.WriteByte(')')\n\t\t\t}\n\t\t}\n\n\t\tif len(content.Bytes()) != 0 {\n\t\t\tbuf.WriteByte('(')\n\t\t\tbuf.Write(content.Bytes())\n\t\t\tbuf.WriteByte(')')\n\t\t}\n\n\tdefault: // float, complex, bool, chan, func, interface\n\t\treturn fmt.Errorf(\"unsupported type: %s\", v.Type())\n\t}\n\treturn nil\n}", "func registerValueType(value interface{}) {\n\tt := reflect.TypeOf(value)\n\tv := reflect.New(t).Elem().Interface()\n\tgob.Register(v)\n}", "func (d *Person) GobEncode() ([]byte, error) {\n\tw := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(w)\n\terr := encoder.Encode(d.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = encoder.Encode(d.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn w.Bytes(), nil\n}", "func MustGobEncode(o interface{}) []byte {\n\toEncoded, err := GobEncode(o)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn oEncoded\n}", "func (d Decimal) GobEncode() ([]byte, error) {\n\treturn d.MarshalBinary()\n}", "func EncodeGobGzip(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tcompressor, err := gzip.NewWriterLevel(&b, gzip.BestCompression)\n\tif err != nil {\n\t\treturn\n\t}\n\tencoder := gob.NewEncoder(compressor)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = compressor.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = b.Bytes()\n\treturn\n}", "func TestEncodeDecodeGob(t *testing.T) {\n\ttestEncodeDecodeFunctions(t,\n\t\tencodeLeaseRequestGob, encodeLeaseReplyGob,\n\t\tdecodeLeaseRequestGob, decodeLeaseReplyGob)\n}", "func execWriteType(_ int, p *gop.Context) {\n\targs := p.GetArgs(3)\n\ttypes.WriteType(args[0].(*bytes.Buffer), args[1].(types.Type), args[2].(types.Qualifier))\n}", "func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {\n\t// Not sure if the following code is necessary\n\t/*\n\t\tif t.Implements(marshalerType) {\n\t\t\treturn marshalerEncoder\n\t\t}\n\t\tif t.Kind() != reflect.Ptr && allowAddr {\n\t\t\tif reflect.PtrTo(t).Implements(marshalerType) {\n\t\t\t\treturn newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))\n\t\t\t}\n\t\t}\n\n\t\tif t.Implements(textMarshalerType) {\n\t\t\treturn textMarshalerEncoder\n\t\t}\n\t\tif t.Kind() != reflect.Ptr && allowAddr {\n\t\t\tif reflect.PtrTo(t).Implements(textMarshalerType) {\n\t\t\t\treturn newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))\n\t\t\t}\n\t\t}\n\t*/\n\n\tswitch t.Kind() {\n\tcase reflect.Bool:\n\t\treturn boolEncoder\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn intEncoder\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn uintEncoder\n\tcase reflect.String:\n\t\treturn stringEncoder\n\tcase reflect.Interface:\n\t\t// return interfaceEncoder\n\t\treturn interfaceEncoder\n\tcase reflect.Struct:\n\t\t// return newStructEncoder(t)\n\t\treturn structEncoder\n\tcase reflect.Map:\n\t\t// return newMapEncoder(t)\n\t\treturn mapEncoder\n\tcase reflect.Slice:\n\t\t// return newSliceEncoder(t)\n\t\treturn sliceEncoder\n\tcase reflect.Array:\n\t\t// return newArrayEncoder(t)\n\t\treturn arrayEncoder\n\tcase reflect.Ptr:\n\t\t// return newPtrEncoder(t)\n\t\treturn ptrEncoder\n\tdefault:\n\t\treturn unsupportedTypeEncoder\n\t}\n}", "func (e *Encoder) PutGOB(val interface{}) {\n\tgobe := gob.NewEncoder(e)\n\tif err := gobe.Encode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to encode %v: %v\", val, err)\n\t}\n}", "func encode(classNames map[int][]string, classes []symbolSet) []byte {\n\tpayload := gobClasses{\n\t\tClasses: make([]gobClass, 0, len(classNames)),\n\t}\n\n\t// index of unique strings\n\tstrings := make(map[string]int32)\n\tstringIndex := func(s string) int32 {\n\t\ti, ok := strings[s]\n\t\tif !ok {\n\t\t\ti = int32(len(payload.Strings))\n\t\t\tstrings[s] = i\n\t\t\tpayload.Strings = append(payload.Strings, s)\n\t\t}\n\t\treturn i\n\t}\n\n\tvar refs []symbol // recycled temporary\n\tfor class, names := range classNames {\n\t\tset := classes[class]\n\n\t\t// names, sorted\n\t\tsort.Strings(names)\n\t\tgobDecls := make([]int32, len(names))\n\t\tfor i, name := range names {\n\t\t\tgobDecls[i] = stringIndex(name)\n\t\t}\n\n\t\t// refs, sorted by ascending (PackageID, name)\n\t\tgobRefs := make([]int32, 0, 2*len(set))\n\t\tfor _, sym := range set.appendSorted(refs[:0]) {\n\t\t\tgobRefs = append(gobRefs,\n\t\t\t\tstringIndex(string(sym.pkg)),\n\t\t\t\tstringIndex(sym.name))\n\t\t}\n\t\tpayload.Classes = append(payload.Classes, gobClass{\n\t\t\tDecls: gobDecls,\n\t\t\tRefs: gobRefs,\n\t\t})\n\t}\n\n\treturn classesCodec.Encode(payload)\n}", "func (v *Type) Encode(sw stream.Writer) error {\n\tif err := sw.WriteStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.SimpleType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 1, Type: wire.TI32}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.SimpleType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.SliceType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 2, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.SliceType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.KeyValueSliceType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 3, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.KeyValueSliceType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.MapType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 4, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.MapType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.ReferenceType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 5, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.ReferenceType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.PointerType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 6, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.PointerType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tcount := 0\n\tif v.SimpleType != nil {\n\t\tcount++\n\t}\n\tif v.SliceType != nil {\n\t\tcount++\n\t}\n\tif v.KeyValueSliceType != nil {\n\t\tcount++\n\t}\n\tif v.MapType != nil {\n\t\tcount++\n\t}\n\tif v.ReferenceType != nil {\n\t\tcount++\n\t}\n\tif v.PointerType != nil {\n\t\tcount++\n\t}\n\n\tif count != 1 {\n\t\treturn fmt.Errorf(\"Type should have exactly one field: got %v fields\", count)\n\t}\n\n\treturn sw.WriteStructEnd()\n}", "func (s *Store) GobEncode() ([]byte, error) {\n\ts.access.RLock()\n\tdefer s.access.RUnlock()\n\n\tbuf := new(bytes.Buffer)\n\n\tencoder := gob.NewEncoder(buf)\n\terr := encoder.Encode(storeVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = encoder.Encode(s.data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (i Type) MarshalGQL(w io.Writer) {\n\tfmt.Fprint(w, strconv.Quote(i.String()))\n}", "func (vd *tValueDiffer) writeType(idx int, t reflect.Type, hl bool) {\n\tb := vd.bufi(idx)\n\tif t.PkgPath() == \"\" {\n\t\tswitch t.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tb.Write(hl, \"*\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Func:\n\t\t\tvd.writeTypeFunc(idx, t, hl)\n\t\tcase reflect.Chan:\n\t\t\tvd.writeTypeHeadChan(idx, t, hl, false)\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Array:\n\t\t\tb.Write(hl, \"[\", t.Len(), \"]\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Slice:\n\t\t\tb.Write(hl, \"[]\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Map:\n\t\t\tb.Write(hl, \"map[\")\n\t\t\tvd.writeType(idx, t.Key(), hl)\n\t\t\tb.Write(hl, \"]\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Struct: // must be unnamed\n\t\t\tb.Write(hl, \"struct\")\n\t\tdefault:\n\t\t\tb.Write(hl, t)\n\t\t}\n\t} else {\n\t\tb.Write(hl, t)\n\t}\n}", "func EncodeInto(buf *[]byte, val interface{}, opts Options) error {\n err := encodeInto(buf, val, opts)\n if err != nil {\n return err\n }\n *buf = encodeFinish(*buf, opts)\n return err\n}", "func (e *commonFormatEncoder) Type() string {\n\treturn \"json\"\n}", "func encode(message interface{}) *bytes.Buffer {\n\tbuffer := &bytes.Buffer{}\n\t// Write struct's data as bytes\n\terr := binary.Write(buffer, binary.BigEndian, message)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn buffer\n}", "func GobGenerateEncoder(w io.Writer) Encoder {\n\treturn gob.NewEncoder(w)\n}", "func (o BlobOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Blob) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func phpType(smdType string) string {\n\tswitch smdType {\n\tcase smd.String:\n\t\treturn phpString\n\tcase smd.Array:\n\t\treturn phpArray\n\tcase smd.Boolean:\n\t\treturn phpBoolean\n\tcase smd.Float:\n\t\treturn phpFloat\n\tcase smd.Integer:\n\t\treturn phpInt\n\tcase smd.Object:\n\t\treturn phpObject\n\t}\n\treturn \"mixed\"\n}", "func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func (e EncoderV2) encode(iVal interface{}) error {\n\n\tvar err error\n\tswitch val := iVal.(type) {\n\tcase nil:\n\t\terr = e.encodeNil()\n\tcase bool:\n\t\terr = e.encodeBool(val)\n\tcase int:\n\t\terr = e.encodeInt(int64(val))\n\tcase int8:\n\t\terr = e.encodeInt(int64(val))\n\tcase int16:\n\t\terr = e.encodeInt(int64(val))\n\tcase int32:\n\t\terr = e.encodeInt(int64(val))\n\tcase int64:\n\t\terr = e.encodeInt(val)\n\tcase uint:\n\t\terr = e.encodeInt(int64(val))\n\tcase uint8:\n\t\terr = e.encodeInt(int64(val))\n\tcase uint16:\n\t\terr = e.encodeInt(int64(val))\n\tcase uint32:\n\t\terr = e.encodeInt(int64(val))\n\tcase uint64:\n\t\tif val > math.MaxInt64 {\n\t\t\treturn errors.New(\"Integer too big: %d. Max integer supported: %d\", val, int64(math.MaxInt64))\n\t\t}\n\t\terr = e.encodeInt(int64(val))\n\tcase float32:\n\t\terr = e.encodeFloat(float64(val))\n\tcase float64:\n\t\terr = e.encodeFloat(val)\n\tcase string:\n\t\terr = e.encodeString(val)\n\tcase []interface{}:\n\t\terr = e.encodeSlice(val)\n\tcase map[string]interface{}:\n\t\terr = e.encodeMap(val)\n\tcase gotime.Date:\n\t\terr = e.encodeDate(val)\n\tcase gotime.Clock:\n\t\terr = e.encodeClock(val)\n\tcase gotime.LocalClock:\n\t\terr = e.encodeLocalClock(val)\n\tcase gotime.LocalTime:\n\t\terr = e.encodeLocalTime(val)\n\tcase time.Time:\n\t\terr = e.encodeTime(val)\n\tcase time.Duration:\n\t\terr = e.encodeDuration(val)\n\tcase structures.Structure:\n\t\terr = e.encodeStructure(val)\n\tdefault:\n\t\t// arbitrary slice types\n\t\tif reflect.TypeOf(iVal).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(iVal)\n\t\t\tnewSlice := make([]interface{}, s.Len())\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tnewSlice[i] = s.Index(i).Interface()\n\t\t\t}\n\t\t\treturn e.encodeSlice(newSlice)\n\t\t}\n\n\t\treturn errors.New(\"Unrecognized type when encoding data for Bolt transport: %T %+v\", val, val)\n\t}\n\n\treturn err\n}", "func encode(sc *stmtctx.StatementContext, b []byte, vals []types.Datum, comparable bool) (_ []byte, err error) {\n\tb = preRealloc(b, vals, comparable)\n\tfor i, length := 0, len(vals); i < length; i++ {\n\t\tswitch vals[i].Kind() {\n\t\tcase types.KindInt64:\n\t\t\tb = encodeSignedInt(b, vals[i].GetInt64(), comparable)\n\t\tcase types.KindUint64:\n\t\t\tb = encodeUnsignedInt(b, vals[i].GetUint64(), comparable)\n\t\tcase types.KindFloat32, types.KindFloat64:\n\t\t\tb = append(b, floatFlag)\n\t\t\tb = EncodeFloat(b, vals[i].GetFloat64())\n\t\tcase types.KindString:\n\t\t\tb = encodeString(b, vals[i], comparable)\n\t\tcase types.KindBytes:\n\t\t\tb = encodeBytes(b, vals[i].GetBytes(), comparable)\n\t\tcase types.KindMysqlTime:\n\t\t\tb = append(b, uintFlag)\n\t\t\tb, err = EncodeMySQLTime(sc, vals[i].GetMysqlTime(), mysql.TypeUnspecified, b)\n\t\t\tif err != nil {\n\t\t\t\treturn b, err\n\t\t\t}\n\t\tcase types.KindMysqlDuration:\n\t\t\t// duration may have negative value, so we cannot use String to encode directly.\n\t\t\tb = append(b, durationFlag)\n\t\t\tb = EncodeInt(b, int64(vals[i].GetMysqlDuration().Duration))\n\t\tcase types.KindMysqlDecimal:\n\t\t\tb = append(b, decimalFlag)\n\t\t\tb, err = EncodeDecimal(b, vals[i].GetMysqlDecimal(), vals[i].Length(), vals[i].Frac())\n\t\t\tif terror.ErrorEqual(err, types.ErrTruncated) {\n\t\t\t\terr = sc.HandleTruncate(err)\n\t\t\t} else if terror.ErrorEqual(err, types.ErrOverflow) {\n\t\t\t\terr = sc.HandleOverflow(err, err)\n\t\t\t}\n\t\tcase types.KindMysqlEnum:\n\t\t\tb = encodeUnsignedInt(b, uint64(vals[i].GetMysqlEnum().ToNumber()), comparable)\n\t\tcase types.KindMysqlSet:\n\t\t\tb = encodeUnsignedInt(b, uint64(vals[i].GetMysqlSet().ToNumber()), comparable)\n\t\tcase types.KindMysqlBit, types.KindBinaryLiteral:\n\t\t\t// We don't need to handle errors here since the literal is ensured to be able to store in uint64 in convertToMysqlBit.\n\t\t\tvar val uint64\n\t\t\tval, err = vals[i].GetBinaryLiteral().ToInt(sc)\n\t\t\tterror.Log(errors.Trace(err))\n\t\t\tb = encodeUnsignedInt(b, val, comparable)\n\t\tcase types.KindMysqlJSON:\n\t\t\tb = append(b, jsonFlag)\n\t\t\tj := vals[i].GetMysqlJSON()\n\t\t\tb = append(b, j.TypeCode)\n\t\t\tb = append(b, j.Value...)\n\t\tcase types.KindNull:\n\t\t\tb = append(b, NilFlag)\n\t\tcase types.KindMinNotNull:\n\t\t\tb = append(b, bytesFlag)\n\t\tcase types.KindMaxValue:\n\t\t\tb = append(b, maxFlag)\n\t\tdefault:\n\t\t\treturn b, errors.Errorf(\"unsupport encode type %d\", vals[i].Kind())\n\t\t}\n\t}\n\n\treturn b, errors.Trace(err)\n}", "func (t *LegacyTranscoder) Encode(value interface{}) ([]byte, uint32, error) {\n\tvar bytes []byte\n\tvar flags uint32\n\tvar err error\n\n\tswitch typeValue := value.(type) {\n\tcase []byte:\n\t\tbytes = typeValue\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)\n\tcase *[]byte:\n\t\tbytes = *typeValue\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)\n\tcase string:\n\t\tbytes = []byte(typeValue)\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)\n\tcase *string:\n\t\tbytes = []byte(*typeValue)\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)\n\tcase json.RawMessage:\n\t\tbytes = typeValue\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)\n\tcase *json.RawMessage:\n\t\tbytes = *typeValue\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)\n\tcase *interface{}:\n\t\treturn t.Encode(*typeValue)\n\tdefault:\n\t\tbytes, err = json.Marshal(value)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)\n\t}\n\n\t// No compression supported currently\n\n\treturn bytes, flags, nil\n}", "func (c *coder) encoderForType(keyOrValue, typ string) (func([]byte) (json.RawMessage, error), error) {\n\tvar enc func([]byte) string\n\tswitch typ {\n\tcase \"json\":\n\t\treturn func(data []byte) (json.RawMessage, error) {\n\t\t\tif err := json.Unmarshal(data, new(json.RawMessage)); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid JSON value %q: %v\", data, err)\n\t\t\t}\n\t\t\treturn json.RawMessage(data), nil\n\t\t}, nil\n\tcase \"hex\":\n\t\tenc = hex.EncodeToString\n\tcase \"base64\":\n\t\tenc = base64.StdEncoding.EncodeToString\n\tcase \"string\":\n\t\tenc = func(data []byte) string {\n\t\t\treturn string(data)\n\t\t}\n\tcase \"avro\":\n\t\treturn c.encodeAvro, nil\n\tcase \"none\":\n\t\treturn func([]byte) (json.RawMessage, error) {\n\t\t\treturn nil, nil\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(`unsupported decoder %#v, only json, string, hex, base64 and avro are supported`, typ)\n\t}\n\treturn func(data []byte) (json.RawMessage, error) {\n\t\tif data == nil {\n\t\t\treturn nullJSON, nil\n\t\t}\n\t\tdata1, err := json.Marshal(enc(data))\n\t\tif err != nil {\n\t\t\t// marshaling a string cannot fail but be defensive.\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.RawMessage(data1), nil\n\t}, nil\n}", "func Encode(out *bytes.Buffer, byts []byte) (*EncodeInfo, error) {\n\tt := createTree(byts)\n\tconst codeSize = 32 // not optimize\n\tc := code{\n\t\tbits: make([]bool, 0, codeSize),\n\t}\n\te := newEncoder()\n\tif err := e.encode(t, c); err != nil {\n\t\treturn nil, err\n\t}\n\tsb, err := e.write(out, byts)\n\tei := &EncodeInfo{\n\t\tbytCodeMap: e.bytCodeMap,\n\t\tSize: sb,\n\t}\n\treturn ei, err\n}", "func (sym *symtab) pyObjectToGo(typ types.Type, sy *symbol, objnm string) (string, error) {\n\tbstr := \"\"\n\tbt, isb := typ.Underlying().(*types.Basic)\n\tswitch {\n\t// case vsym.goname == \"interface{}\":\n\t// \tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_string(%s(%s)%s))\\n\", varnm, i, vsym.go2py, anm, vsym.go2pyParenEx)\n\t// case vsym.hasHandle(): // note: assuming int64 handles\n\t// \tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_int64(C.int64_t(%s(%s)%s)))\\n\", varnm, i, vsym.go2py, anm, vsym.go2pyParenEx)\n\tcase isb:\n\t\tbk := bt.Kind()\n\t\tswitch {\n\t\tcase types.Int <= bk && bk <= types.Int64:\n\t\t\tbstr += fmt.Sprintf(\"%s(C.PyLong_AsLongLong(%s))\", sy.goname, objnm)\n\t\tcase types.Uint <= bk && bk <= types.Uintptr:\n\t\t\tbstr += fmt.Sprintf(\"%s(C.PyLong_AsUnsignedLongLong(%s))\", sy.goname, objnm)\n\t\tcase types.Float32 <= bk && bk <= types.Float64:\n\t\t\tbstr += fmt.Sprintf(\"%s(C.PyFloat_AsDouble(%s))\", sy.goname, objnm)\n\t\tcase bk == types.String:\n\t\t\tbstr += fmt.Sprintf(\"C.GoString(C.PyBytes_AsString(%s))\", objnm)\n\t\tcase bk == types.Bool:\n\t\t\tbstr += fmt.Sprintf(\"boolPyToGo(C.char(C.PyLong_AsLongLong(%s)))\", objnm)\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"pyObjectToGo: type not handled: %s\", typ.String())\n\t}\n\treturn bstr, nil\n}", "func (g *Graph) GobEncode() ([]byte, error) {\n\tgGob := graphGob{[]string{}, map[string]map[string]int{}}\n\n\t// add vertexes and edges to gGob\n\tfor key, v := range g.vertexes {\n\t\tgGob.Vertexes = append(gGob.Vertexes, key)\n\n\t\tgGob.Edges[key] = map[string]int{}\n\n\t\t// for each neighbor...\n\t\tfor neighbor, weight := range v.neighbors {\n\t\t\t// save the edge connection to the neighbor into the edges map\n\t\t\tgGob.Edges[key][neighbor.key] = weight\n\t\t}\n\t}\n\n\t// encode gGob\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\terr := enc.Encode(gGob)\n\n\treturn buf.Bytes(), err\n}", "func Encode(b *bytes.Buffer, v *Value) (int, error) {\n\tswitch v.Type {\n\tcase DOUBLE:\n\t\treturn EncodeDouble(b, v.value.(float64))\n\n\tcase BOOLEAN:\n\t\treturn EncodeBoolean(b, v.value.(bool))\n\n\tcase STRING:\n\t\tfallthrough\n\tcase LONG_STRING:\n\t\treturn EncodeString(b, v.value.(string))\n\n\tcase OBJECT:\n\t\treturn EncodeObject(b, v)\n\n\tcase ECMA_ARRAY:\n\t\treturn EncodeECMAArray(b, v)\n\n\tcase STRICT_ARRAY:\n\t\treturn EncodeStrictArray(b, v)\n\n\tcase DATE:\n\t\treturn EncodeDate(b, v.value.(float64), v.offset)\n\n\tcase NULL:\n\t\treturn EncodeNull(b)\n\n\tcase UNDEFINED:\n\t\treturn EncodeUndefined(b)\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unrecognized AMF type 0x%02X\", v.Type))\n\t}\n}", "func dumptype(t *_type) {\n\tif t == nil {\n\t\treturn\n\t}\n\n\t// If we've definitely serialized the type before,\n\t// no need to do it again.\n\tb := &typecache[t.hash&(typeCacheBuckets-1)]\n\tif t == b.t[0] {\n\t\treturn\n\t}\n\tfor i := 1; i < typeCacheAssoc; i++ {\n\t\tif t == b.t[i] {\n\t\t\t// Move-to-front\n\t\t\tfor j := i; j > 0; j-- {\n\t\t\t\tb.t[j] = b.t[j-1]\n\t\t\t}\n\t\t\tb.t[0] = t\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Might not have been dumped yet. Dump it and\n\t// remember we did so.\n\tfor j := typeCacheAssoc - 1; j > 0; j-- {\n\t\tb.t[j] = b.t[j-1]\n\t}\n\tb.t[0] = t\n\n\t// dump the type\n\tdumpint(tagType)\n\tdumpint(uint64(uintptr(unsafe.Pointer(t))))\n\tdumpint(uint64(t.size))\n\tif x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == \"\" {\n\t\tdumpstr(t.string())\n\t} else {\n\t\tpkgpathstr := t.nameOff(x.pkgpath).name()\n\t\tpkgpath := stringStructOf(&pkgpathstr)\n\t\tnamestr := t.name()\n\t\tname := stringStructOf(&namestr)\n\t\tdumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))\n\t\tdwrite(pkgpath.str, uintptr(pkgpath.len))\n\t\tdwritebyte('.')\n\t\tdwrite(name.str, uintptr(name.len))\n\t}\n\tdumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)\n}", "func (JSONMap) GormDBDataType(db *gorm.DB, field *schema.Field) string {\n\tswitch db.Dialector.Name() {\n\tcase \"sqlite\":\n\t\treturn \"JSON\"\n\tcase \"mysql\":\n\t\treturn \"JSON\"\n\tcase \"postgres\":\n\t\treturn \"JSONB\"\n\tcase \"sqlserver\":\n\t\treturn \"NVARCHAR(MAX)\"\n\t}\n\treturn \"\"\n}", "func (enc *DictByteArrayEncoder) Type() parquet.Type {\n\treturn parquet.Types.ByteArray\n}", "func (w *shardWorker) encodeData(val interface{}) ([]byte, error) {\n\t// Reusing encoders gave issues\n\tencoder := gob.NewEncoder(w.buffer)\n\n\terr := encoder.Encode(val)\n\tif err != nil {\n\t\tw.buffer.Reset()\n\t\treturn nil, err\n\t}\n\n\tencoded := make([]byte, w.buffer.Len())\n\t_, err = w.buffer.Read(encoded)\n\n\tw.buffer.Reset()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn encoded, nil\n}", "func (p valuePointer) Encode(b []byte) []byte {\n\tbinary.BigEndian.PutUint32(b[:4], p.Fid)\n\tbinary.BigEndian.PutUint32(b[4:8], p.Len)\n\tbinary.BigEndian.PutUint32(b[8:valuePointerEncodedSize], p.Offset)\n\treturn b[:valuePointerEncodedSize]\n}", "func MarshalWithParams(val interface{}, params string) ([]byte, error) {}", "func encode(ins interface{}) ([]byte, error) {\n\treturn json.Marshal(ins)\n}", "func toByteArray(data interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := gob.NewEncoder(&buf)\n\terr := enc.Encode(data)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn buf.Bytes(), nil\n\t}\n}", "func RegisterAccountTypeCodec(o interface{}, name string) {\n\tModuleCdc.RegisterConcrete(o, name, nil)\n}", "func (b *NetworkTypeBox) Encode(buf *bin.Buffer) error {\n\tif b == nil || b.NetworkType == nil {\n\t\treturn fmt.Errorf(\"unable to encode NetworkTypeClass as nil\")\n\t}\n\treturn b.NetworkType.Encode(buf)\n}" ]
[ "0.6781096", "0.6659338", "0.64719117", "0.63125616", "0.61463565", "0.6085887", "0.6061027", "0.59845275", "0.5943138", "0.58399534", "0.57951427", "0.57240325", "0.5702898", "0.5651715", "0.5639708", "0.5628176", "0.5625311", "0.56052095", "0.5578742", "0.55714303", "0.556946", "0.5485936", "0.5484025", "0.54814816", "0.54797286", "0.5478267", "0.54677045", "0.54677045", "0.5467383", "0.54484415", "0.54426163", "0.54390085", "0.5394681", "0.5378419", "0.5372703", "0.5362393", "0.53505796", "0.53316194", "0.53302085", "0.5313285", "0.5293369", "0.5293276", "0.52906203", "0.5266892", "0.5255373", "0.5254919", "0.52087843", "0.52078074", "0.51737326", "0.51712227", "0.5164921", "0.51644915", "0.5138519", "0.5137778", "0.5124942", "0.5103389", "0.5083521", "0.507943", "0.5072149", "0.5069651", "0.50424886", "0.5036023", "0.5021329", "0.50148684", "0.5006895", "0.5004233", "0.49930343", "0.49851078", "0.49828896", "0.49651402", "0.4949848", "0.4941891", "0.4934002", "0.49151143", "0.49147913", "0.49027157", "0.4890578", "0.48893583", "0.48844376", "0.48839986", "0.48823524", "0.48779163", "0.4875694", "0.48751795", "0.4874527", "0.48688218", "0.48647594", "0.4858508", "0.48548836", "0.4850249", "0.4838027", "0.4833569", "0.4829489", "0.48264447", "0.48177785", "0.48093793", "0.4805504", "0.48007265", "0.4799596", "0.47945866" ]
0.61903644
4
EncodeType will convert give given pointer into a gob encoded byte set, and return them
func (g *GobEncoderLight) EncodeType(t interface{}) ([]byte, error) { defer func() { g.bytes.Reset() }() err := g.encoder.Encode(t) if err != nil { return nil, err } return g.bytes.Bytes(), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t Type) GobEncode() ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\n\tgt := gobType{\n\t\tVersion: 0,\n\t\tImpl: t.typeImpl,\n\t}\n\n\terr := enc.Encode(gt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error encoding cty.Type: %s\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func GobEncode(value interface{}) []byte {\n buf := bytes.NewBuffer(make([]byte, 0, 1024))\n encoder := gob.NewEncoder(buf)\n // encode unknown type might cause some error\n err := encoder.Encode(value)\n if err != nil {\n gobDebug.Panicf(\"Failed to encode a value: %+v\\n%v\\n\", value, err)\n }\n return buf.Bytes()\n}", "func (t *capsuleType) GobEncode() ([]byte, error) {\n\treturn nil, fmt.Errorf(\"cannot gob-encode capsule type %q\", t.FriendlyName(friendlyTypeName))\n}", "func (g *GobTranscoder) EncodeType(t interface{}) ([]byte, error) {\n\tg.encoderMut.Lock()\n\tdefer func() {\n\t\tg.outBytes.Reset()\n\t\tg.encoderMut.Unlock()\n\t}()\n\terr := g.encoder.Encode(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn g.outBytes.Bytes(), nil\n}", "func gobEncode(value interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := gob.NewEncoder(&buf)\n\terr := enc.Encode(value)\n\treturn buf.Bytes(), err\n}", "func EncodeGob(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tencoder := gob.NewEncoder(&b)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tdata = b.Bytes()\n\treturn\n}", "func gobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tenc := gob.NewEncoder(&buff)\n\terr := enc.Encode(data)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func GobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tencoder := gob.NewEncoder(&buff)\n\tif err := encoder.Encode(data); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func (x *Rat) GobEncode() ([]byte, error) {}", "func (t Time) GobEncode() ([]byte, error) {}", "func GobEncode(v interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := gob.NewEncoder(&buf).Encode(&v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (fc fctuple) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tencoder := gob.NewEncoder(&buf)\n\n\tif err := encoder.Encode(&fc.feature); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(fc.POSTag); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (info *ImageInfoType) GobEncode() (buf []byte, err error) {\n\tfields := []interface{}{info.data, info.smask, info.n, info.w, info.h, info.cs,\n\t\tinfo.pal, info.bpc, info.f, info.dp, info.trns, info.scale, info.dpi}\n\tw := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(w)\n\tfor j := 0; j < len(fields) && err == nil; j++ {\n\t\terr = encoder.Encode(fields[j])\n\t}\n\tif err == nil {\n\t\tbuf = w.Bytes()\n\t}\n\treturn\n}", "func RatGobEncode(x *big.Rat,) ([]byte, error)", "func EncodeGob(data interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\terr := gob.NewEncoder(b).Encode(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}", "func packType(v interface{}) byte {\n\tswitch v.(type) {\n\tcase nil:\n\t\treturn ptNone\n\tcase string:\n\t\treturn ptString\n\tcase int32:\n\t\treturn ptInt\n\tcase float32:\n\t\treturn ptFloat\n\tcase uint32:\n\t\treturn ptPtr\n\tcase []uint16:\n\t\treturn ptWString\n\tcase color.NRGBA:\n\t\treturn ptColor\n\tcase uint64:\n\t\treturn ptUint64\n\tdefault:\n\t\tpanic(\"invalid vdf.Node\")\n\t}\n}", "func IntGobEncode(x *big.Int,) ([]byte, error)", "func EncodeGobZlib(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tcompressor, err := zlib.NewWriterLevel(&b, zlib.BestCompression)\n\tif err != nil {\n\t\treturn\n\t}\n\tencoder := gob.NewEncoder(compressor)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = compressor.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = b.Bytes()\n\treturn\n}", "func gobInfoEncode(gob *storage.GobInfo) ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\tgobEnc := realgob.NewEncoder(buf)\n\terr := gobEnc.Encode(gob)\n\treturn buf.Bytes(), err\n}", "func (val Value) GobEncode() ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\n\tgv := gobValue{\n\t\tVersion: 0,\n\t\tTy: val.ty,\n\t\tV: val.v,\n\t}\n\n\terr := enc.Encode(gv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error encoding cty.Value: %s\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (k *Key) GobEncode() ([]byte, error) {\n\treturn []byte(k.Encode()), nil\n}", "func (sf singleFeature) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tencoder := gob.NewEncoder(&buf)\n\n\tif err := encoder.Encode(sf.featureType); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(sf.value); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (w *Writer) getType(obj interface{}, length int) []byte {\n\t// check length\n\tw.checkLength(length)\n\tvar tmp = make([]byte, 8)\n\n\tswitch objType := obj.(type) {\n\tcase int8:\n\t\ttmp[0] = byte(objType)\n\tcase uint8:\n\t\ttmp[0] = byte(objType)\n\tcase int16:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\tcase uint16:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\tcase int32:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\tcase uint32:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\tcase int:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\tcase int64:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\t\ttmp[4] = byte(objType >> 32)\n\t\ttmp[5] = byte(objType >> 40)\n\t\ttmp[6] = byte(objType >> 48)\n\t\ttmp[7] = byte(objType >> 56)\n\tcase uint64:\n\t\ttmp[0] = byte(objType)\n\t\ttmp[1] = byte(objType >> 8)\n\t\ttmp[2] = byte(objType >> 16)\n\t\ttmp[3] = byte(objType >> 24)\n\t\ttmp[4] = byte(objType >> 32)\n\t\ttmp[5] = byte(objType >> 40)\n\t\ttmp[6] = byte(objType >> 48)\n\t\ttmp[7] = byte(objType >> 56)\n\tdefault:\n\t\tlog.Error(\"Unknown data type:\", reflect.TypeOf(obj))\n\t\treturn nil\n\t}\n\n\treturn tmp[:length]\n}", "func Encode(data interface{}) []byte {\n v := Value{data}\n return v.Encode()\n}", "func encodePtrArg(t reflect.Type) bool {\n\tif t.Implements(binaryMarshalerType) || t.Implements(textMarshalerType) {\n\t\treturn false\n\t}\n\treturn t.Kind() == reflect.Struct || t.Kind() == reflect.Array\n}", "func (t Tensor) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := gob.NewEncoder(&buf)\n\n\terr := enc.Encode(t.DataType())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = enc.Encode(t.Shape())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch t.DataType() {\n\tcase tf.String:\n\t\t// TensorFlow Go package currently does not support\n\t\t// string serialization. Let's do it ourselves.\n\t\terr = enc.Encode(t.Tensor.Value().(string))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\t_, err = t.WriteContentsTo(&buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (_type Type) MarshalGQL(w io.Writer) {\n\tio.WriteString(w, strconv.Quote(_type.String()))\n}", "func (_type Type) MarshalGQL(w io.Writer) {\n\tio.WriteString(w, strconv.Quote(_type.String()))\n}", "func FloatGobEncode(x *big.Float,) ([]byte, error)", "func (set AppleSet) GobEncode() ([]byte, error) {\n\tset.s.RLock()\n\tdefer set.s.RUnlock()\n\n\tbuf := &bytes.Buffer{}\n\terr := gob.NewEncoder(buf).Encode(set.m)\n\treturn buf.Bytes(), err\n}", "func GobMarshal(i interface{}) ([]byte, error) {\n\tbuf := bytes.NewBuffer(nil)\n\tencoder := gob.NewEncoder(buf)\n\terr := encoder.Encode(i)\n\treturn buf.Bytes(), err\n}", "func (t Timestamp) GobEncode() ([]byte, error) {\n\treturn t.Time().MarshalBinary()\n}", "func encodeNodeWithType(n Node, w *io.BinWriter) {\n\tw.WriteB(byte(n.Type()))\n\tn.EncodeBinary(w)\n}", "func (b *ProxyTypeBox) Encode(buf *bin.Buffer) error {\n\tif b == nil || b.ProxyType == nil {\n\t\treturn fmt.Errorf(\"unable to encode ProxyTypeClass as nil\")\n\t}\n\treturn b.ProxyType.Encode(buf)\n}", "func (t *Type) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gt gobType\n\terr := dec.Decode(&gt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Type: %s\", err)\n\t}\n\tif gt.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Type encoding version %d; only 0 is supported\", gt.Version)\n\t}\n\n\tt.typeImpl = gt.Impl\n\n\treturn nil\n}", "func (g *Gammas) GobEncode() ([]byte, error) {\n\tbuff := bytes.Buffer{}\n\tif g != nil {\n\t\tfor _, g2 := range *g {\n\t\t\tbuff.Write(g2.Marshal())\n\t\t}\n\t}\n\treturn buff.Bytes(), nil\n}", "func (p *perceptron) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tencoder := gob.NewEncoder(&buf)\n\n\t// if err := encoder.Encode(&p.weights); err != nil {\n\t// \treturn nil, err\n\t// }\n\n\tif err := encoder.Encode(&p.weightsSF); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := encoder.Encode(&p.weightsTF); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(&p.totals); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(&p.steps); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := encoder.Encode(p.instancesSeen); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (e *Encoder) EncodePtr(ps ...unsafe.Pointer) []byte {\n\n\tengines := e.engines\n\tfor i := 0; i < len(engines) && i < len(ps); i++ {\n\t\tengines[i](e, ps[i])\n\t\te.objPos = 0\n\t}\n\treturn e.reset()\n}", "func RegisterType(x interface{}) {\n\tgob.Register(x)\n}", "func (bs endecBytes) Type() byte {\n\treturn bs[0] >> 4\n}", "func BenchmarkEncodingGobTweetStruct(b *testing.B) {\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tvar bb bytes.Buffer\n\t\tenc := gob.NewEncoder(&bb)\n\t\t_ = enc.Encode(tw)\n\t\t_ = bb.Bytes()\n\t}\n}", "func Encode(e encoding.Type, input interface{}) ([]byte, error) {\n\treturn encoding.Format(e).Encode(input)\n}", "func Encode(val interface{}, opts Options) ([]byte, error) {\n var ret []byte\n\n buf := newBytes()\n err := encodeInto(&buf, val, opts)\n\n /* check for errors */\n if err != nil {\n freeBytes(buf)\n return nil, err\n }\n\n /* htmlescape or correct UTF-8 if opts enable */\n old := buf\n buf = encodeFinish(old, opts)\n pbuf := ((*rt.GoSlice)(unsafe.Pointer(&buf))).Ptr\n pold := ((*rt.GoSlice)(unsafe.Pointer(&old))).Ptr\n\n /* return when allocated a new buffer */\n if pbuf != pold {\n freeBytes(old)\n return buf, nil\n }\n\n /* make a copy of the result */\n ret = make([]byte, len(buf))\n copy(ret, buf)\n\n freeBytes(buf)\n /* return the buffer into pool */\n return ret, nil\n}", "func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {\n\tif t.Implements(marshalerType) {\n\t\treturn marshalerEncoder\n\t}\n\n\tif t.Implements(binaryMarshalerType) {\n\t\treturn binaryMarshalerEncoder\n\t}\n\n\tif t.Implements(gobEncoderType) {\n\t\treturn gobEncoder\n\t}\n\n\tif t.Kind() != reflect.Ptr && allowAddr {\n\t\tft := reflect.PtrTo(t)\n\t\tif ft.Implements(marshalerType) {\n\t\t\treturn addrEncoder(marshalerEncoder, newTypeEncoder(t, false))\n\t\t}\n\t\tif ft.Implements(binaryMarshalerType) {\n\t\t\treturn addrEncoder(binaryMarshalerEncoder, newTypeEncoder(t, false))\n\t\t}\n\t\tif ft.Implements(gobEncoderType) {\n\t\t\treturn addrEncoder(gobEncoder, newTypeEncoder(t, false))\n\t\t}\n\t}\n\n\tswitch t.Kind() {\n\tcase reflect.Bool:\n\t\treturn boolEncoder\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn intEncoder\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn uintEncoder\n\tcase reflect.Float32:\n\t\treturn float32Encoder\n\tcase reflect.Float64:\n\t\treturn float64Encoder\n\tcase reflect.Complex64:\n\t\treturn complex64Encoder\n\tcase reflect.Complex128:\n\t\treturn complex128Encoder\n\tcase reflect.String:\n\t\treturn stringEncoder\n\tcase reflect.Map:\n\t\tif !isNative(t.Key().Kind(), true) { // eventually will support pointer keys and structs, but not gonna happen until it's needed\n\t\t\treturn invalidEncoder\n\t\t}\n\t\treturn newMapEncoder(t)\n\tcase reflect.Slice, reflect.Array:\n\t\treturn newSliceEncoder(t.Elem())\n\tcase reflect.Struct:\n\t\treturn newStructEncoder(t)\n\tcase reflect.Ptr:\n\t\treturn ptrEncoder(newTypeEncoder(t.Elem(), false))\n\tcase reflect.Interface:\n\t\treturn ifaceEncoder\n\t}\n\treturn invalidEncoder\n}", "func (e *encoder) marshalPointer(t reflect.Type, v reflect.Value, n nestedTypeData) error {\n\tswitch t.Elem().Kind() {\n\tcase reflect.Slice:\n\t\treturn e.marshalVector(t.Elem(), v.Elem(), n)\n\tcase reflect.String:\n\t\treturn e.marshalString(v.Elem(), n)\n\tcase reflect.Struct:\n\t\treturn e.marshalStructOrUnionPointer(t, v)\n\t}\n\treturn newValueError(ErrInvalidPointerType, t.Name())\n}", "func interfaceEncode(enc *gob.Encoder, p Pythagoras) {\n\t//the encode will fail unless the concrete type has been\n\t//registered.We registered it in the calling function.\n\n\t//Pass pointer to interface so Encode sees(and hence sends) a value of interface type.\n\t//If we passed p directly it would see the concrete typoe instead.\n\t//See the blog post,\"The laws of Reflection\"for background\n\n\terr := enc.Encode(&p)\n\tif err != nil {\n\t\tlog.Fatal(\"Encode:\", err)\n\t}\n}", "func (t *TGen) Encode(val interface{}) {\n\tvalType := reflect.TypeOf(val)\n\tt.src = \"\\nfunc Encode(buf []byte, t \" + TypeName(valType.String(), true) + \") []byte {\\n\"\n\tt.stackName.Push(\"t\")\n\tt.encode(valType)\n\tt.src += \"return buf \\n}\\n\\n\"\n\tt.stackName.Clear()\n\tt.tmpNameGen.Clear()\n\t_, e := t.f.Write([]byte(t.src))\n\terr.Panic(e)\n}", "func (g *Generator) genTypeEncoder(t reflect.Type, in string, tags fieldTags, indent int, assumeNonEmpty bool) error {\n\tws := strings.Repeat(\" \", indent)\n\n\tmarshalerIface := reflect.TypeOf((*easyjson.Marshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(marshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"(\"+in+\").MarshalEasyJSON(out)\")\n\t\treturn nil\n\t}\n\n\tmarshalerIface = reflect.TypeOf((*json.Marshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(marshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"out.Raw( (\"+in+\").MarshalJSON() )\")\n\t\treturn nil\n\t}\n\n\tmarshalerIface = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(marshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"out.RawText( (\"+in+\").MarshalText() )\")\n\t\treturn nil\n\t}\n\n\terr := g.genTypeEncoderNoCheck(t, in, tags, indent, assumeNonEmpty)\n\treturn err\n}", "func Marshal(data interface{}, typ DataFormat) []byte {\n\tswitch typ {\n\tcase GOB:\n\t\tvar buf bytes.Buffer\n\t\tgob.NewEncoder(&buf).Encode(data)\n\t\treturn buf.Bytes()\n\n\tcase JSON:\n\t\tbuf, err := json.Marshal(data)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn buf\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unrecognized data type\"))\n\t}\n}", "func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}", "func Marshal(val interface{}) ([]byte, error) {}", "func ToGob(src interface{}) ([]byte, error) {\n\treturn NewGobber().To(src)\n}", "func (v valuePointer) Encode() []byte {\n\tb := make([]byte, valuePointerSize)\n\n\t// Copy over the content from p to b.\n\t*(*valuePointer)(unsafe.Pointer(&b[0])) = v\n\n\treturn b\n}", "func (v *Value) Encode() []byte {\n var output []byte\n switch d := v.data.(type) {\n case []byte:\n output = d\n case string:\n output = []byte(d)\n case bool:\n output = strconv.AppendBool(output, d)\n case float32, float64:\n f64 := reflect.ValueOf(v.data).Float()\n output = strconv.AppendFloat(output, f64, 'g', -1, 64)\n case int, int8, int16, int32, int64:\n i64 := reflect.ValueOf(v.data).Int()\n output = strconv.AppendInt(output, i64, 10)\n case uint, uint8, uint16, uint32, uint64:\n u64 := reflect.ValueOf(v.data).Uint()\n output = strconv.AppendUint(output, u64, 10)\n default:\n if j, e := json.Marshal(v.data); e == nil {\n output = j\n } else {\n panic(\"Value.Encode: \" + e.Error())\n }\n }\n return output\n}", "func (s *CountMinSketch) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\t_, err := s.WriteTo(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (*Store) RegisterType(i interface{}) {\n\tgob.Register(i)\n}", "func (d *DFA) GobEncode() ([]byte, error) {\n\tbuffer := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(buffer)\n\tif err := encoder.Encode(d.initial); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode initial state\")\n\t}\n\tif err := encoder.Encode(d.table); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode sparse table\")\n\t}\n\treturn buffer.Bytes(), nil\n}", "func gobFlattenRegister(t reflect.Type) {\n\tif t.Kind() == reflect.Interface {\n\t\treturn\n\t}\n\tfor t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\tpz := reflect.New(t)\n\tgob.Register(pz.Elem().Interface())\n}", "func EncodingType(value string) Option {\n\treturn addParam(\"encoding-type\", value)\n}", "func (a *Array) GobEncode() ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\n\terr := checkErr(\n\t\tenc.Encode(a.bits),\n\t\tenc.Encode(a.length),\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"bit: encode failed (%v)\", err)\n\t}\n\n\treturn buf.Bytes(), err\n}", "func encode(buf *bytes.Buffer, v reflect.Value) error {\n\tswitch v.Kind() {\n\tcase reflect.Invalid: // ignore\n\n\tcase reflect.Float32, reflect.Float64:\n\t\tfmt.Fprintf(buf, \"%f\", v.Float())\n\n\tcase reflect.Complex128, reflect.Complex64:\n\t\tc := v.Complex()\n\t\tfmt.Fprintf(buf, \"#C(%f %f)\", real(c), imag(c))\n\n\tcase reflect.Bool:\n\t\tif v.Bool() {\n\t\t\tfmt.Fprintf(buf, \"t\")\n\t\t}\n\n\tcase reflect.Interface:\n\t\t// type output\n\t\tt := v.Elem().Type()\n\n\t\tleftBuffer := new(bytes.Buffer)\n\t\trightBuffer := new(bytes.Buffer)\n\n\t\tif t.Name() == \"\" { // 名前がつけられてないtypeはそのまま表示する\n\t\t\tfmt.Fprintf(leftBuffer, \"%q\", t)\n\t\t} else {\n\t\t\tfmt.Fprintf(leftBuffer, \"\\\"%s.%s\\\" \", t.PkgPath(), t.Name()) //一意ではないとはこういうことか?\n\t\t}\n\n\t\t// value output\n\t\tif err := encode(rightBuffer, v.Elem()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(rightBuffer.Bytes()) != 0 {\n\t\t\tbuf.WriteByte('(')\n\t\t\tbuf.Write(leftBuffer.Bytes())\n\t\t\tbuf.WriteByte(' ')\n\t\t\tbuf.Write(rightBuffer.Bytes())\n\t\t\tbuf.WriteByte(')')\n\t\t}\n\n\tcase reflect.Int, reflect.Int8, reflect.Int16,\n\t\treflect.Int32, reflect.Int64:\n\t\tfmt.Fprintf(buf, \"%d\", v.Int())\n\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16,\n\t\treflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\tfmt.Fprintf(buf, \"%d\", v.Uint())\n\n\tcase reflect.String:\n\t\tfmt.Fprintf(buf, \"%q\", v.String())\n\n\tcase reflect.Ptr:\n\t\treturn encode(buf, v.Elem())\n\n\tcase reflect.Array, reflect.Slice: // (value ...)\n\n\t\tcontent := new(bytes.Buffer)\n\n\t\tisFirst := true\n\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tif isFirst {\n\t\t\t\tisFirst = false\n\t\t\t\tcontent.WriteByte(' ')\n\t\t\t}\n\t\t\tif err := encode(content, v.Index(i)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif len(content.Bytes()) != 0 {\n\t\t\tbuf.WriteByte('(')\n\t\t\tbuf.Write(content.Bytes())\n\t\t\tbuf.WriteByte(')')\n\t\t}\n\n\tcase reflect.Struct: // ((name value) ...)\n\n\t\tcontent := new(bytes.Buffer)\n\n\t\tisFirst := true\n\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\trightBuffer := new(bytes.Buffer)\n\t\t\tif err := encode(rightBuffer, v.Field(i)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(rightBuffer.Bytes()) != 0 {\n\t\t\t\tif isFirst {\n\t\t\t\t\tcontent.WriteByte(' ')\n\t\t\t\t\tisFirst = false\n\t\t\t\t}\n\t\t\t\tcontent.WriteByte('(')\n\t\t\t\tfmt.Fprintf(content, \"%s\", v.Type().Field(i).Name)\n\t\t\t\tcontent.WriteByte(' ')\n\t\t\t\tcontent.Write(rightBuffer.Bytes())\n\t\t\t\tcontent.WriteByte(')')\n\t\t\t}\n\t\t}\n\n\t\tif len(content.Bytes()) != 0 {\n\t\t\tbuf.WriteByte('(')\n\t\t\tbuf.Write(content.Bytes())\n\t\t\tbuf.WriteByte(')')\n\t\t}\n\n\tcase reflect.Map: // ((key value) ...)\n\t\tisFirst := true\n\t\tcontent := new(bytes.Buffer)\n\n\t\tfor _, key := range v.MapKeys() {\n\t\t\tif isFirst {\n\t\t\t\tcontent.WriteByte(' ')\n\t\t\t\tisFirst = false\n\t\t\t}\n\n\t\t\tleftBuffer := new(bytes.Buffer)\n\t\t\trightBuffer := new(bytes.Buffer)\n\n\t\t\tif err := encode(leftBuffer, key); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := encode(rightBuffer, v.MapIndex(key)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(rightBuffer.Bytes()) != 0 {\n\t\t\t\tcontent.WriteByte('(')\n\t\t\t\tcontent.Write(leftBuffer.Bytes())\n\t\t\t\tcontent.WriteByte(' ')\n\t\t\t\tcontent.Write(rightBuffer.Bytes())\n\t\t\t\tcontent.WriteByte(')')\n\t\t\t}\n\t\t}\n\n\t\tif len(content.Bytes()) != 0 {\n\t\t\tbuf.WriteByte('(')\n\t\t\tbuf.Write(content.Bytes())\n\t\t\tbuf.WriteByte(')')\n\t\t}\n\n\tdefault: // float, complex, bool, chan, func, interface\n\t\treturn fmt.Errorf(\"unsupported type: %s\", v.Type())\n\t}\n\treturn nil\n}", "func registerValueType(value interface{}) {\n\tt := reflect.TypeOf(value)\n\tv := reflect.New(t).Elem().Interface()\n\tgob.Register(v)\n}", "func (d *Person) GobEncode() ([]byte, error) {\n\tw := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(w)\n\terr := encoder.Encode(d.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = encoder.Encode(d.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn w.Bytes(), nil\n}", "func MustGobEncode(o interface{}) []byte {\n\toEncoded, err := GobEncode(o)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn oEncoded\n}", "func (d Decimal) GobEncode() ([]byte, error) {\n\treturn d.MarshalBinary()\n}", "func EncodeGobGzip(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tcompressor, err := gzip.NewWriterLevel(&b, gzip.BestCompression)\n\tif err != nil {\n\t\treturn\n\t}\n\tencoder := gob.NewEncoder(compressor)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = compressor.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = b.Bytes()\n\treturn\n}", "func TestEncodeDecodeGob(t *testing.T) {\n\ttestEncodeDecodeFunctions(t,\n\t\tencodeLeaseRequestGob, encodeLeaseReplyGob,\n\t\tdecodeLeaseRequestGob, decodeLeaseReplyGob)\n}", "func execWriteType(_ int, p *gop.Context) {\n\targs := p.GetArgs(3)\n\ttypes.WriteType(args[0].(*bytes.Buffer), args[1].(types.Type), args[2].(types.Qualifier))\n}", "func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {\n\t// Not sure if the following code is necessary\n\t/*\n\t\tif t.Implements(marshalerType) {\n\t\t\treturn marshalerEncoder\n\t\t}\n\t\tif t.Kind() != reflect.Ptr && allowAddr {\n\t\t\tif reflect.PtrTo(t).Implements(marshalerType) {\n\t\t\t\treturn newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))\n\t\t\t}\n\t\t}\n\n\t\tif t.Implements(textMarshalerType) {\n\t\t\treturn textMarshalerEncoder\n\t\t}\n\t\tif t.Kind() != reflect.Ptr && allowAddr {\n\t\t\tif reflect.PtrTo(t).Implements(textMarshalerType) {\n\t\t\t\treturn newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))\n\t\t\t}\n\t\t}\n\t*/\n\n\tswitch t.Kind() {\n\tcase reflect.Bool:\n\t\treturn boolEncoder\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn intEncoder\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn uintEncoder\n\tcase reflect.String:\n\t\treturn stringEncoder\n\tcase reflect.Interface:\n\t\t// return interfaceEncoder\n\t\treturn interfaceEncoder\n\tcase reflect.Struct:\n\t\t// return newStructEncoder(t)\n\t\treturn structEncoder\n\tcase reflect.Map:\n\t\t// return newMapEncoder(t)\n\t\treturn mapEncoder\n\tcase reflect.Slice:\n\t\t// return newSliceEncoder(t)\n\t\treturn sliceEncoder\n\tcase reflect.Array:\n\t\t// return newArrayEncoder(t)\n\t\treturn arrayEncoder\n\tcase reflect.Ptr:\n\t\t// return newPtrEncoder(t)\n\t\treturn ptrEncoder\n\tdefault:\n\t\treturn unsupportedTypeEncoder\n\t}\n}", "func (e *Encoder) PutGOB(val interface{}) {\n\tgobe := gob.NewEncoder(e)\n\tif err := gobe.Encode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to encode %v: %v\", val, err)\n\t}\n}", "func encode(classNames map[int][]string, classes []symbolSet) []byte {\n\tpayload := gobClasses{\n\t\tClasses: make([]gobClass, 0, len(classNames)),\n\t}\n\n\t// index of unique strings\n\tstrings := make(map[string]int32)\n\tstringIndex := func(s string) int32 {\n\t\ti, ok := strings[s]\n\t\tif !ok {\n\t\t\ti = int32(len(payload.Strings))\n\t\t\tstrings[s] = i\n\t\t\tpayload.Strings = append(payload.Strings, s)\n\t\t}\n\t\treturn i\n\t}\n\n\tvar refs []symbol // recycled temporary\n\tfor class, names := range classNames {\n\t\tset := classes[class]\n\n\t\t// names, sorted\n\t\tsort.Strings(names)\n\t\tgobDecls := make([]int32, len(names))\n\t\tfor i, name := range names {\n\t\t\tgobDecls[i] = stringIndex(name)\n\t\t}\n\n\t\t// refs, sorted by ascending (PackageID, name)\n\t\tgobRefs := make([]int32, 0, 2*len(set))\n\t\tfor _, sym := range set.appendSorted(refs[:0]) {\n\t\t\tgobRefs = append(gobRefs,\n\t\t\t\tstringIndex(string(sym.pkg)),\n\t\t\t\tstringIndex(sym.name))\n\t\t}\n\t\tpayload.Classes = append(payload.Classes, gobClass{\n\t\t\tDecls: gobDecls,\n\t\t\tRefs: gobRefs,\n\t\t})\n\t}\n\n\treturn classesCodec.Encode(payload)\n}", "func (v *Type) Encode(sw stream.Writer) error {\n\tif err := sw.WriteStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.SimpleType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 1, Type: wire.TI32}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.SimpleType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.SliceType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 2, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.SliceType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.KeyValueSliceType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 3, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.KeyValueSliceType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.MapType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 4, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.MapType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.ReferenceType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 5, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.ReferenceType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif v.PointerType != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 6, Type: wire.TStruct}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.PointerType.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tcount := 0\n\tif v.SimpleType != nil {\n\t\tcount++\n\t}\n\tif v.SliceType != nil {\n\t\tcount++\n\t}\n\tif v.KeyValueSliceType != nil {\n\t\tcount++\n\t}\n\tif v.MapType != nil {\n\t\tcount++\n\t}\n\tif v.ReferenceType != nil {\n\t\tcount++\n\t}\n\tif v.PointerType != nil {\n\t\tcount++\n\t}\n\n\tif count != 1 {\n\t\treturn fmt.Errorf(\"Type should have exactly one field: got %v fields\", count)\n\t}\n\n\treturn sw.WriteStructEnd()\n}", "func (s *Store) GobEncode() ([]byte, error) {\n\ts.access.RLock()\n\tdefer s.access.RUnlock()\n\n\tbuf := new(bytes.Buffer)\n\n\tencoder := gob.NewEncoder(buf)\n\terr := encoder.Encode(storeVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = encoder.Encode(s.data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (vd *tValueDiffer) writeType(idx int, t reflect.Type, hl bool) {\n\tb := vd.bufi(idx)\n\tif t.PkgPath() == \"\" {\n\t\tswitch t.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tb.Write(hl, \"*\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Func:\n\t\t\tvd.writeTypeFunc(idx, t, hl)\n\t\tcase reflect.Chan:\n\t\t\tvd.writeTypeHeadChan(idx, t, hl, false)\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Array:\n\t\t\tb.Write(hl, \"[\", t.Len(), \"]\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Slice:\n\t\t\tb.Write(hl, \"[]\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Map:\n\t\t\tb.Write(hl, \"map[\")\n\t\t\tvd.writeType(idx, t.Key(), hl)\n\t\t\tb.Write(hl, \"]\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Struct: // must be unnamed\n\t\t\tb.Write(hl, \"struct\")\n\t\tdefault:\n\t\t\tb.Write(hl, t)\n\t\t}\n\t} else {\n\t\tb.Write(hl, t)\n\t}\n}", "func (i Type) MarshalGQL(w io.Writer) {\n\tfmt.Fprint(w, strconv.Quote(i.String()))\n}", "func EncodeInto(buf *[]byte, val interface{}, opts Options) error {\n err := encodeInto(buf, val, opts)\n if err != nil {\n return err\n }\n *buf = encodeFinish(*buf, opts)\n return err\n}", "func (e *commonFormatEncoder) Type() string {\n\treturn \"json\"\n}", "func encode(message interface{}) *bytes.Buffer {\n\tbuffer := &bytes.Buffer{}\n\t// Write struct's data as bytes\n\terr := binary.Write(buffer, binary.BigEndian, message)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn buffer\n}", "func (o BlobOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Blob) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func GobGenerateEncoder(w io.Writer) Encoder {\n\treturn gob.NewEncoder(w)\n}", "func phpType(smdType string) string {\n\tswitch smdType {\n\tcase smd.String:\n\t\treturn phpString\n\tcase smd.Array:\n\t\treturn phpArray\n\tcase smd.Boolean:\n\t\treturn phpBoolean\n\tcase smd.Float:\n\t\treturn phpFloat\n\tcase smd.Integer:\n\t\treturn phpInt\n\tcase smd.Object:\n\t\treturn phpObject\n\t}\n\treturn \"mixed\"\n}", "func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func (e EncoderV2) encode(iVal interface{}) error {\n\n\tvar err error\n\tswitch val := iVal.(type) {\n\tcase nil:\n\t\terr = e.encodeNil()\n\tcase bool:\n\t\terr = e.encodeBool(val)\n\tcase int:\n\t\terr = e.encodeInt(int64(val))\n\tcase int8:\n\t\terr = e.encodeInt(int64(val))\n\tcase int16:\n\t\terr = e.encodeInt(int64(val))\n\tcase int32:\n\t\terr = e.encodeInt(int64(val))\n\tcase int64:\n\t\terr = e.encodeInt(val)\n\tcase uint:\n\t\terr = e.encodeInt(int64(val))\n\tcase uint8:\n\t\terr = e.encodeInt(int64(val))\n\tcase uint16:\n\t\terr = e.encodeInt(int64(val))\n\tcase uint32:\n\t\terr = e.encodeInt(int64(val))\n\tcase uint64:\n\t\tif val > math.MaxInt64 {\n\t\t\treturn errors.New(\"Integer too big: %d. Max integer supported: %d\", val, int64(math.MaxInt64))\n\t\t}\n\t\terr = e.encodeInt(int64(val))\n\tcase float32:\n\t\terr = e.encodeFloat(float64(val))\n\tcase float64:\n\t\terr = e.encodeFloat(val)\n\tcase string:\n\t\terr = e.encodeString(val)\n\tcase []interface{}:\n\t\terr = e.encodeSlice(val)\n\tcase map[string]interface{}:\n\t\terr = e.encodeMap(val)\n\tcase gotime.Date:\n\t\terr = e.encodeDate(val)\n\tcase gotime.Clock:\n\t\terr = e.encodeClock(val)\n\tcase gotime.LocalClock:\n\t\terr = e.encodeLocalClock(val)\n\tcase gotime.LocalTime:\n\t\terr = e.encodeLocalTime(val)\n\tcase time.Time:\n\t\terr = e.encodeTime(val)\n\tcase time.Duration:\n\t\terr = e.encodeDuration(val)\n\tcase structures.Structure:\n\t\terr = e.encodeStructure(val)\n\tdefault:\n\t\t// arbitrary slice types\n\t\tif reflect.TypeOf(iVal).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(iVal)\n\t\t\tnewSlice := make([]interface{}, s.Len())\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tnewSlice[i] = s.Index(i).Interface()\n\t\t\t}\n\t\t\treturn e.encodeSlice(newSlice)\n\t\t}\n\n\t\treturn errors.New(\"Unrecognized type when encoding data for Bolt transport: %T %+v\", val, val)\n\t}\n\n\treturn err\n}", "func encode(sc *stmtctx.StatementContext, b []byte, vals []types.Datum, comparable bool) (_ []byte, err error) {\n\tb = preRealloc(b, vals, comparable)\n\tfor i, length := 0, len(vals); i < length; i++ {\n\t\tswitch vals[i].Kind() {\n\t\tcase types.KindInt64:\n\t\t\tb = encodeSignedInt(b, vals[i].GetInt64(), comparable)\n\t\tcase types.KindUint64:\n\t\t\tb = encodeUnsignedInt(b, vals[i].GetUint64(), comparable)\n\t\tcase types.KindFloat32, types.KindFloat64:\n\t\t\tb = append(b, floatFlag)\n\t\t\tb = EncodeFloat(b, vals[i].GetFloat64())\n\t\tcase types.KindString:\n\t\t\tb = encodeString(b, vals[i], comparable)\n\t\tcase types.KindBytes:\n\t\t\tb = encodeBytes(b, vals[i].GetBytes(), comparable)\n\t\tcase types.KindMysqlTime:\n\t\t\tb = append(b, uintFlag)\n\t\t\tb, err = EncodeMySQLTime(sc, vals[i].GetMysqlTime(), mysql.TypeUnspecified, b)\n\t\t\tif err != nil {\n\t\t\t\treturn b, err\n\t\t\t}\n\t\tcase types.KindMysqlDuration:\n\t\t\t// duration may have negative value, so we cannot use String to encode directly.\n\t\t\tb = append(b, durationFlag)\n\t\t\tb = EncodeInt(b, int64(vals[i].GetMysqlDuration().Duration))\n\t\tcase types.KindMysqlDecimal:\n\t\t\tb = append(b, decimalFlag)\n\t\t\tb, err = EncodeDecimal(b, vals[i].GetMysqlDecimal(), vals[i].Length(), vals[i].Frac())\n\t\t\tif terror.ErrorEqual(err, types.ErrTruncated) {\n\t\t\t\terr = sc.HandleTruncate(err)\n\t\t\t} else if terror.ErrorEqual(err, types.ErrOverflow) {\n\t\t\t\terr = sc.HandleOverflow(err, err)\n\t\t\t}\n\t\tcase types.KindMysqlEnum:\n\t\t\tb = encodeUnsignedInt(b, uint64(vals[i].GetMysqlEnum().ToNumber()), comparable)\n\t\tcase types.KindMysqlSet:\n\t\t\tb = encodeUnsignedInt(b, uint64(vals[i].GetMysqlSet().ToNumber()), comparable)\n\t\tcase types.KindMysqlBit, types.KindBinaryLiteral:\n\t\t\t// We don't need to handle errors here since the literal is ensured to be able to store in uint64 in convertToMysqlBit.\n\t\t\tvar val uint64\n\t\t\tval, err = vals[i].GetBinaryLiteral().ToInt(sc)\n\t\t\tterror.Log(errors.Trace(err))\n\t\t\tb = encodeUnsignedInt(b, val, comparable)\n\t\tcase types.KindMysqlJSON:\n\t\t\tb = append(b, jsonFlag)\n\t\t\tj := vals[i].GetMysqlJSON()\n\t\t\tb = append(b, j.TypeCode)\n\t\t\tb = append(b, j.Value...)\n\t\tcase types.KindNull:\n\t\t\tb = append(b, NilFlag)\n\t\tcase types.KindMinNotNull:\n\t\t\tb = append(b, bytesFlag)\n\t\tcase types.KindMaxValue:\n\t\t\tb = append(b, maxFlag)\n\t\tdefault:\n\t\t\treturn b, errors.Errorf(\"unsupport encode type %d\", vals[i].Kind())\n\t\t}\n\t}\n\n\treturn b, errors.Trace(err)\n}", "func (t *LegacyTranscoder) Encode(value interface{}) ([]byte, uint32, error) {\n\tvar bytes []byte\n\tvar flags uint32\n\tvar err error\n\n\tswitch typeValue := value.(type) {\n\tcase []byte:\n\t\tbytes = typeValue\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)\n\tcase *[]byte:\n\t\tbytes = *typeValue\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)\n\tcase string:\n\t\tbytes = []byte(typeValue)\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)\n\tcase *string:\n\t\tbytes = []byte(*typeValue)\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)\n\tcase json.RawMessage:\n\t\tbytes = typeValue\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)\n\tcase *json.RawMessage:\n\t\tbytes = *typeValue\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)\n\tcase *interface{}:\n\t\treturn t.Encode(*typeValue)\n\tdefault:\n\t\tbytes, err = json.Marshal(value)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\tflags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)\n\t}\n\n\t// No compression supported currently\n\n\treturn bytes, flags, nil\n}", "func (c *coder) encoderForType(keyOrValue, typ string) (func([]byte) (json.RawMessage, error), error) {\n\tvar enc func([]byte) string\n\tswitch typ {\n\tcase \"json\":\n\t\treturn func(data []byte) (json.RawMessage, error) {\n\t\t\tif err := json.Unmarshal(data, new(json.RawMessage)); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid JSON value %q: %v\", data, err)\n\t\t\t}\n\t\t\treturn json.RawMessage(data), nil\n\t\t}, nil\n\tcase \"hex\":\n\t\tenc = hex.EncodeToString\n\tcase \"base64\":\n\t\tenc = base64.StdEncoding.EncodeToString\n\tcase \"string\":\n\t\tenc = func(data []byte) string {\n\t\t\treturn string(data)\n\t\t}\n\tcase \"avro\":\n\t\treturn c.encodeAvro, nil\n\tcase \"none\":\n\t\treturn func([]byte) (json.RawMessage, error) {\n\t\t\treturn nil, nil\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(`unsupported decoder %#v, only json, string, hex, base64 and avro are supported`, typ)\n\t}\n\treturn func(data []byte) (json.RawMessage, error) {\n\t\tif data == nil {\n\t\t\treturn nullJSON, nil\n\t\t}\n\t\tdata1, err := json.Marshal(enc(data))\n\t\tif err != nil {\n\t\t\t// marshaling a string cannot fail but be defensive.\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.RawMessage(data1), nil\n\t}, nil\n}", "func Encode(out *bytes.Buffer, byts []byte) (*EncodeInfo, error) {\n\tt := createTree(byts)\n\tconst codeSize = 32 // not optimize\n\tc := code{\n\t\tbits: make([]bool, 0, codeSize),\n\t}\n\te := newEncoder()\n\tif err := e.encode(t, c); err != nil {\n\t\treturn nil, err\n\t}\n\tsb, err := e.write(out, byts)\n\tei := &EncodeInfo{\n\t\tbytCodeMap: e.bytCodeMap,\n\t\tSize: sb,\n\t}\n\treturn ei, err\n}", "func (sym *symtab) pyObjectToGo(typ types.Type, sy *symbol, objnm string) (string, error) {\n\tbstr := \"\"\n\tbt, isb := typ.Underlying().(*types.Basic)\n\tswitch {\n\t// case vsym.goname == \"interface{}\":\n\t// \tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_string(%s(%s)%s))\\n\", varnm, i, vsym.go2py, anm, vsym.go2pyParenEx)\n\t// case vsym.hasHandle(): // note: assuming int64 handles\n\t// \tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_int64(C.int64_t(%s(%s)%s)))\\n\", varnm, i, vsym.go2py, anm, vsym.go2pyParenEx)\n\tcase isb:\n\t\tbk := bt.Kind()\n\t\tswitch {\n\t\tcase types.Int <= bk && bk <= types.Int64:\n\t\t\tbstr += fmt.Sprintf(\"%s(C.PyLong_AsLongLong(%s))\", sy.goname, objnm)\n\t\tcase types.Uint <= bk && bk <= types.Uintptr:\n\t\t\tbstr += fmt.Sprintf(\"%s(C.PyLong_AsUnsignedLongLong(%s))\", sy.goname, objnm)\n\t\tcase types.Float32 <= bk && bk <= types.Float64:\n\t\t\tbstr += fmt.Sprintf(\"%s(C.PyFloat_AsDouble(%s))\", sy.goname, objnm)\n\t\tcase bk == types.String:\n\t\t\tbstr += fmt.Sprintf(\"C.GoString(C.PyBytes_AsString(%s))\", objnm)\n\t\tcase bk == types.Bool:\n\t\t\tbstr += fmt.Sprintf(\"boolPyToGo(C.char(C.PyLong_AsLongLong(%s)))\", objnm)\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"pyObjectToGo: type not handled: %s\", typ.String())\n\t}\n\treturn bstr, nil\n}", "func (g *Graph) GobEncode() ([]byte, error) {\n\tgGob := graphGob{[]string{}, map[string]map[string]int{}}\n\n\t// add vertexes and edges to gGob\n\tfor key, v := range g.vertexes {\n\t\tgGob.Vertexes = append(gGob.Vertexes, key)\n\n\t\tgGob.Edges[key] = map[string]int{}\n\n\t\t// for each neighbor...\n\t\tfor neighbor, weight := range v.neighbors {\n\t\t\t// save the edge connection to the neighbor into the edges map\n\t\t\tgGob.Edges[key][neighbor.key] = weight\n\t\t}\n\t}\n\n\t// encode gGob\n\tbuf := &bytes.Buffer{}\n\tenc := gob.NewEncoder(buf)\n\terr := enc.Encode(gGob)\n\n\treturn buf.Bytes(), err\n}", "func Encode(b *bytes.Buffer, v *Value) (int, error) {\n\tswitch v.Type {\n\tcase DOUBLE:\n\t\treturn EncodeDouble(b, v.value.(float64))\n\n\tcase BOOLEAN:\n\t\treturn EncodeBoolean(b, v.value.(bool))\n\n\tcase STRING:\n\t\tfallthrough\n\tcase LONG_STRING:\n\t\treturn EncodeString(b, v.value.(string))\n\n\tcase OBJECT:\n\t\treturn EncodeObject(b, v)\n\n\tcase ECMA_ARRAY:\n\t\treturn EncodeECMAArray(b, v)\n\n\tcase STRICT_ARRAY:\n\t\treturn EncodeStrictArray(b, v)\n\n\tcase DATE:\n\t\treturn EncodeDate(b, v.value.(float64), v.offset)\n\n\tcase NULL:\n\t\treturn EncodeNull(b)\n\n\tcase UNDEFINED:\n\t\treturn EncodeUndefined(b)\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unrecognized AMF type 0x%02X\", v.Type))\n\t}\n}", "func dumptype(t *_type) {\n\tif t == nil {\n\t\treturn\n\t}\n\n\t// If we've definitely serialized the type before,\n\t// no need to do it again.\n\tb := &typecache[t.hash&(typeCacheBuckets-1)]\n\tif t == b.t[0] {\n\t\treturn\n\t}\n\tfor i := 1; i < typeCacheAssoc; i++ {\n\t\tif t == b.t[i] {\n\t\t\t// Move-to-front\n\t\t\tfor j := i; j > 0; j-- {\n\t\t\t\tb.t[j] = b.t[j-1]\n\t\t\t}\n\t\t\tb.t[0] = t\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Might not have been dumped yet. Dump it and\n\t// remember we did so.\n\tfor j := typeCacheAssoc - 1; j > 0; j-- {\n\t\tb.t[j] = b.t[j-1]\n\t}\n\tb.t[0] = t\n\n\t// dump the type\n\tdumpint(tagType)\n\tdumpint(uint64(uintptr(unsafe.Pointer(t))))\n\tdumpint(uint64(t.size))\n\tif x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == \"\" {\n\t\tdumpstr(t.string())\n\t} else {\n\t\tpkgpathstr := t.nameOff(x.pkgpath).name()\n\t\tpkgpath := stringStructOf(&pkgpathstr)\n\t\tnamestr := t.name()\n\t\tname := stringStructOf(&namestr)\n\t\tdumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))\n\t\tdwrite(pkgpath.str, uintptr(pkgpath.len))\n\t\tdwritebyte('.')\n\t\tdwrite(name.str, uintptr(name.len))\n\t}\n\tdumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)\n}", "func (JSONMap) GormDBDataType(db *gorm.DB, field *schema.Field) string {\n\tswitch db.Dialector.Name() {\n\tcase \"sqlite\":\n\t\treturn \"JSON\"\n\tcase \"mysql\":\n\t\treturn \"JSON\"\n\tcase \"postgres\":\n\t\treturn \"JSONB\"\n\tcase \"sqlserver\":\n\t\treturn \"NVARCHAR(MAX)\"\n\t}\n\treturn \"\"\n}", "func (enc *DictByteArrayEncoder) Type() parquet.Type {\n\treturn parquet.Types.ByteArray\n}", "func (w *shardWorker) encodeData(val interface{}) ([]byte, error) {\n\t// Reusing encoders gave issues\n\tencoder := gob.NewEncoder(w.buffer)\n\n\terr := encoder.Encode(val)\n\tif err != nil {\n\t\tw.buffer.Reset()\n\t\treturn nil, err\n\t}\n\n\tencoded := make([]byte, w.buffer.Len())\n\t_, err = w.buffer.Read(encoded)\n\n\tw.buffer.Reset()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn encoded, nil\n}", "func (p valuePointer) Encode(b []byte) []byte {\n\tbinary.BigEndian.PutUint32(b[:4], p.Fid)\n\tbinary.BigEndian.PutUint32(b[4:8], p.Len)\n\tbinary.BigEndian.PutUint32(b[8:valuePointerEncodedSize], p.Offset)\n\treturn b[:valuePointerEncodedSize]\n}", "func MarshalWithParams(val interface{}, params string) ([]byte, error) {}", "func encode(ins interface{}) ([]byte, error) {\n\treturn json.Marshal(ins)\n}", "func toByteArray(data interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := gob.NewEncoder(&buf)\n\terr := enc.Encode(data)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn buf.Bytes(), nil\n\t}\n}", "func RegisterAccountTypeCodec(o interface{}, name string) {\n\tModuleCdc.RegisterConcrete(o, name, nil)\n}", "func typeEncoder(t reflect.Type) (encoderFunc, error) {\n\tencoderCache.RLock()\n\tf := encoderCache.m[t]\n\tencoderCache.RUnlock()\n\n\tif f != nil {\n\t\treturn f, nil\n\t}\n\n\tf, err := newTypeEncoder(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// lock the cache and initialize it\n\tencoderCache.Lock()\n\tif encoderCache.m == nil {\n\t\tencoderCache.m = make(map[reflect.Type]encoderFunc)\n\t}\n\n\tencoderCache.m[t] = f\n\tencoderCache.Unlock()\n\n\treturn f, nil\n}" ]
[ "0.6781423", "0.6659542", "0.6471993", "0.61916727", "0.6146297", "0.6085416", "0.60611904", "0.59847766", "0.5941497", "0.5839333", "0.5795199", "0.5722494", "0.57033974", "0.5649842", "0.5639476", "0.5628993", "0.5625364", "0.56061196", "0.55789036", "0.557128", "0.5569147", "0.5484018", "0.54834986", "0.54832345", "0.54807806", "0.5477892", "0.54686564", "0.54686564", "0.5465917", "0.54478055", "0.54432774", "0.54384685", "0.5394483", "0.5378532", "0.53728235", "0.5362562", "0.53503335", "0.53331614", "0.53322977", "0.5314934", "0.5293202", "0.52919924", "0.5290265", "0.52672046", "0.52581215", "0.5255393", "0.52074283", "0.52073336", "0.5174418", "0.5171447", "0.5165648", "0.51641524", "0.5140295", "0.51364267", "0.51242214", "0.5106228", "0.50817233", "0.5080924", "0.50717974", "0.5069707", "0.50415397", "0.5037497", "0.50207454", "0.5014264", "0.5006385", "0.500387", "0.49935606", "0.49848676", "0.4983296", "0.49646", "0.49483415", "0.49418488", "0.49341816", "0.4916391", "0.4915911", "0.49014533", "0.48914644", "0.4888074", "0.4885203", "0.48849255", "0.48817694", "0.48779163", "0.4874454", "0.48730826", "0.4872193", "0.48680475", "0.48610044", "0.48591715", "0.48544237", "0.48474807", "0.48403195", "0.4833944", "0.48317918", "0.48261204", "0.48184648", "0.48101562", "0.48043153", "0.48007935", "0.479993", "0.4794504" ]
0.6314191
3
DecodeType will attempt to decode the buffer into the pointer outT
func (g *GobTranscoder) DecodeType(buf []byte, outT interface{}) error { g.decoderMut.Lock() defer func() { g.inBytes.Reset() g.decoderMut.Unlock() }() reader := bytes.NewReader(buf) if _, err := io.Copy(g.inBytes, reader); err != nil { return err } return g.decoder.Decode(outT) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *GobDecoderLight) DecodeType(buf []byte, outT interface{}) error {\n\tdefer func() {\n\t\tg.bytes.Reset()\n\t}()\n\treader := bytes.NewReader(buf)\n\tif _, err := io.Copy(g.bytes, reader); err != nil {\n\t\treturn err\n\t}\n\treturn g.decoder.Decode(outT)\n}", "func (dec *Decoder) decodeType(isInterface bool) Code {\n\treturn 0\n}", "func (d *Decoder) Type() (Type, error) {\n\n\t// start with 1 byte and append to it until we get a clean varint\n\tvar (\n\t\ttag uint64\n\t\ttagBytes []byte\n\t)\n\nreadTagByte:\n\tfor {\n\t\tvar singleByte = make([]byte, 1)\n\t\t_, err := io.ReadFull(d.input, singleByte)\n\t\tif err != nil {\n\t\t\treturn typeUninited, err\n\t\t}\n\t\ttagBytes = append(tagBytes, singleByte[0])\n\n\t\tvar byteCount int\n\t\ttag, byteCount = varint.ConsumeVarint(tagBytes)\n\t\tswitch {\n\t\tcase byteCount == varint.ErrCodeTruncated:\n\t\t\tcontinue readTagByte\n\t\tcase byteCount > 0:\n\t\t\tfmt.Fprintln(dbg, \"\\tvarint byteCount:\", byteCount)\n\t\t\tbreak readTagByte // we got a varint!\n\t\tdefault:\n\t\t\treturn typeUninited, fmt.Errorf(\"bipf: broken varint tag field\")\n\t\t}\n\t}\n\n\tfmt.Fprintf(dbg, \"\\tdecoded %x to tag: %d\\n\", tagBytes, tag)\n\n\t// apply mask to get type\n\td.currentType = Type(tag & tagMask)\n\tif d.currentType >= TypeReserved {\n\t\treturn 0, fmt.Errorf(\"bipf: invalid type: %s\", d.currentType)\n\t}\n\n\t// shift right to get length\n\td.currentLen = uint64(tag >> tagSize)\n\n\t// drop some debugging info\n\tfmt.Fprintln(dbg, \"\\tvalue type:\", d.currentType)\n\tfmt.Fprintln(dbg, \"\\tvalue length:\", d.currentLen)\n\tfmt.Fprintln(dbg)\n\tdbg.Sync()\n\n\treturn d.currentType, nil\n}", "func (g *Generator) genTypeDecoder(t reflect.Type, out string, tags fieldTags, indent int) error {\n\tws := strings.Repeat(\" \", indent)\n\n\tunmarshalerIface := reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"(\"+out+\").UnmarshalEasyJSON(in)\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.Raw(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalJSON(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.UnsafeBytes(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalText(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\terr := g.genTypeDecoderNoCheck(t, out, tags, indent)\n\treturn err\n}", "func Decode(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func Decode(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func (decoder *Decoder) ReadType() (string, error) {\n s, err := decoder.ReadString()\n if err != nil {\n decoder.Recover()\n refId, err := decoder.ReadInt()\n if err != nil {\n return \"\", errors.New(\"readType: unexpected code\")\n }\n ref, ok := decoder.refMap[refId]\n if !ok {\n return \"\", errors.New(\"readType: unknown type\")\n }\n stringType, ok := ref.(string) // assertion\n if !ok {\n return \"\", errors.New(\"readType: unknown type\")\n }\n decoder.success()\n return stringType, nil\n }\n decoder.addRef(s)\n decoder.success()\n return s, nil\n}", "func (t *LegacyTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode binary in a byte array or interface\")\n\t\t}\n\t} else if valueType == gocbcore.StringType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *string:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode a string in a string or interface\")\n\t\t}\n\t} else if valueType == gocbcore.JSONType {\n\t\terr := json.Unmarshal(bytes, &out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func (c byteCodec) Decode(data []byte, out interface{}) error {\n\t// #1 reflect method but is slow\n\t//reflect.Indirect(reflect.ValueOf(i)).SetBytes(data)\n\n\t// #2\n\tswitch v := out.(type) {\n\tcase *[]byte:\n\t\t*v = *&data\n\t\treturn nil\n\tcase *string:\n\t\t*v = string(data)\n\t\treturn nil\n\n\tdefault:\n\t\treturn fmt.Errorf(\"%T is not a []byte\", out)\n\t}\n}", "func (bs endecBytes) Type() byte {\n\treturn bs[0] >> 4\n}", "func (t SGTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\n\tswitch typedOut := out.(type) {\n\tcase *[]byte:\n\t\t*typedOut = bytes\n\t\treturn nil\n\tdefault:\n\t\tdefaultTranscoder := gocb.DefaultTranscoder{}\n\t\treturn defaultTranscoder.Decode(bytes, flags, out)\n\n\t}\n\n}", "func decodeMsgPack(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func decodeMsgPack(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func (d Decoder) Decode(out interface{}) (err error) {\n\treturn d.Provider.Decode(out)\n}", "func (d *Decoder) Peek() Type {\n\tdefer func() { d.lastCall = peekCall }()\n\tif d.lastCall == readCall {\n\t\td.value, d.err = d.Read()\n\t}\n\treturn d.value.typ\n}", "func (d *Decoder) DecodeValue(rv reflect.Value) error {\n\tif rv.Kind() != reflect.Ptr {\n\t\treturn fmt.Errorf(\"msgpack: not a pointer type %q\", rv.Type())\n\t}\n\n\tcustomType, err := d.readCustomType()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.decodeValue(rv, customType)\n}", "func interfaceDecode(dec *gob.Decoder) Pythagoras {\n\t//The decode will fail unless the concrete type on the wire has been registered.\n\t//we registered it in the calling function\n\tvar p Pythagoras\n\terr := dec.Decode(&p)\n\tif err != nil {\n\t\tlog.Fatal(\"Decode:\", err)\n\t}\n\treturn p\n}", "func (t *JSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\treturn errors.New(\"binary datatype is not supported by JSONTranscoder\")\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"string datatype is not supported by JSONTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\terr := json.Unmarshal(bytes, &out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func (d *Decoder) Decode(r io.Reader, t *dials.Type) (reflect.Value, error) {\n\ttomlBytes, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn reflect.Value{}, fmt.Errorf(\"error reading TOML: %s\", err)\n\t}\n\n\t// Use the TagCopyingMangler to copy over TOML tags from dials tags if TOML\n\t// tags aren't specified.\n\ttfmr := transform.NewTransformer(t.Type(),\n\t\t&tagformat.TagCopyingMangler{\n\t\t\tSrcTag: common.DialsTagName, NewTag: TOMLTagName})\n\tval, tfmErr := tfmr.Translate()\n\tif tfmErr != nil {\n\t\treturn reflect.Value{}, fmt.Errorf(\"failed to convert tags: %s\", tfmErr)\n\t}\n\n\t// Get a pointer to our value, so we can pass that.\n\tinstance := val.Addr().Interface()\n\terr = tomlparser.Unmarshal(tomlBytes, instance)\n\tif err != nil {\n\t\treturn reflect.Value{}, err\n\t}\n\n\tunmangledVal, unmangleErr := tfmr.ReverseTranslate(val)\n\tif unmangleErr != nil {\n\t\treturn reflect.Value{}, unmangleErr\n\t}\n\n\treturn unmangledVal, nil\n}", "func (dec *Decoder) Decode(e interface{}) error {\n\tif e == nil {\n\t\treturn dec.DecodeValue(reflect.Value{})\n\t}\n\tvalue := reflect.ValueOf(e)\n\t// If e represents a value as opposed to a pointer, the answer won't\n\t// get back to the caller. Make sure it's a pointer.\n\tif value.Type().Kind() != reflect.Ptr {\n\t\tdec.err = errors.New(\"binpack: attempt to decode into a non-pointer\")\n\t\treturn dec.err\n\t}\n\treturn dec.DecodeValue(value)\n}", "func Decode(data []byte) any {\n\tvar buffer = new(protocol.ByteBuffer)\n\tbuffer.WriteUBytes(data)\n\tvar packet = protocol.Read(buffer)\n\treturn packet\n}", "func readType(r io.ByteReader) *Type {\n\tt := &Type{}\n\tt.Address = readUvarint(r)\n\tt.Size = readUvarint(r)\n\tt.Name = readString(r)\n\tt.IsPtr = readUvarint(r) == 1\n\tt.FieldList = readFieldList(r)\n\treturn t\n}", "func (d *Decoder) Decode(ctx context.Context, b []byte) (interface{}, error) {\n\tnv := reflect.New(d.typ).Interface()\n\tif err := d.fn(ctx, b, nv); err != nil {\n\t\treturn nil, err\n\t}\n\tptr := reflect.ValueOf(nv)\n\treturn ptr.Elem().Interface(), nil\n}", "func (t *RawBinaryTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode binary in a byte array or interface\")\n\t\t}\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"only binary datatype is supported by RawBinaryTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\treturn errors.New(\"only binary datatype is supported by RawBinaryTranscoder\")\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func Decode(data interface{}, ptr interface{}) {\n v := Value{data}\n v.Decode(ptr)\n}", "func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{\"out_type\": out_type}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"DecodeRaw\",\n\t\tInput: []tf.Input{\n\t\t\tbytes,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func decodeMsgPack(buf []byte, out interface{}) error {\n\tr := bytes.NewBuffer(buf)\n\thd := MsgpackHandle{}\n\tdec := NewDecoder(r, &hd)\n\treturn dec.Decode(out)\n}", "func (g *Generator) genTypeDecoderNoCheck(t reflect.Type, out string, tags fieldTags, indent int) error {\n\tws := strings.Repeat(\" \", indent)\n\t// Check whether type is primitive, needs to be done after interface check.\n\tif dec := customDecoders[t.String()]; dec != \"\" {\n\t\tfmt.Fprintln(g.out, ws+out+\" = \"+dec)\n\t\treturn nil\n\t} else if dec := primitiveStringDecoders[t.Kind()]; dec != \"\" && tags.asString {\n\t\tif tags.intern && t.Kind() == reflect.String {\n\t\t\tdec = \"in.StringIntern()\"\n\t\t}\n\t\tfmt.Fprintln(g.out, ws+out+\" = \"+g.getType(t)+\"(\"+dec+\")\")\n\t\treturn nil\n\t} else if dec := primitiveDecoders[t.Kind()]; dec != \"\" {\n\t\tif tags.intern && t.Kind() == reflect.String {\n\t\t\tdec = \"in.StringIntern()\"\n\t\t}\n\t\tif tags.noCopy && t.Kind() == reflect.String {\n\t\t\tdec = \"in.UnsafeString()\"\n\t\t}\n\t\tfmt.Fprintln(g.out, ws+out+\" = \"+g.getType(t)+\"(\"+dec+\")\")\n\t\treturn nil\n\t}\n\n\tswitch t.Kind() {\n\tcase reflect.Slice:\n\t\ttmpVar := g.uniqueVarName()\n\t\telem := t.Elem()\n\n\t\tif elem.Kind() == reflect.Uint8 && elem.Name() == \"uint8\" {\n\t\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = nil\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tif g.simpleBytes {\n\t\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = []byte(in.String())\")\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = in.Bytes()\")\n\t\t\t}\n\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\n\t\t} else {\n\n\t\t\tcapacity := 1\n\t\t\tif elem.Size() > 0 {\n\t\t\t\tcapacity = minSliceBytes / int(elem.Size())\n\t\t\t}\n\n\t\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = nil\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Delim('[')\")\n\t\t\tfmt.Fprintln(g.out, ws+\" if \"+out+\" == nil {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" if !in.IsDelim(']') {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = make(\"+g.getType(t)+\", 0, \"+fmt.Sprint(capacity)+\")\")\n\t\t\tfmt.Fprintln(g.out, ws+\" } else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = \"+g.getType(t)+\"{}\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" } else { \")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = (\"+out+\")[:0]\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" for !in.IsDelim(']') {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" var \"+tmpVar+\" \"+g.getType(elem))\n\n\t\t\tif err := g.genTypeDecoder(elem, tmpVar, tags, indent+2); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = append(\"+out+\", \"+tmpVar+\")\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.WantComma()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Delim(']')\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\t}\n\n\tcase reflect.Array:\n\t\titerVar := g.uniqueVarName()\n\t\telem := t.Elem()\n\n\t\tif elem.Kind() == reflect.Uint8 && elem.Name() == \"uint8\" {\n\t\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" copy(\"+out+\"[:], in.Bytes())\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\n\t\t} else {\n\n\t\t\tlength := t.Len()\n\n\t\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Delim('[')\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+iterVar+\" := 0\")\n\t\t\tfmt.Fprintln(g.out, ws+\" for !in.IsDelim(']') {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" if \"+iterVar+\" < \"+fmt.Sprint(length)+\" {\")\n\n\t\t\tif err := g.genTypeDecoder(elem, \"(\"+out+\")[\"+iterVar+\"]\", tags, indent+3); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+iterVar+\"++\")\n\t\t\tfmt.Fprintln(g.out, ws+\" } else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.SkipRecursive()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.WantComma()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Delim(']')\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\t}\n\n\tcase reflect.Struct:\n\t\tdec := g.getDecoderName(t)\n\t\tg.addType(t)\n\n\t\tif len(out) > 0 && out[0] == '*' {\n\t\t\t// NOTE: In order to remove an extra reference to a pointer\n\t\t\tfmt.Fprintln(g.out, ws+dec+\"(in, \"+out[1:]+\")\")\n\t\t} else {\n\t\t\tfmt.Fprintln(g.out, ws+dec+\"(in, &\"+out+\")\")\n\t\t}\n\n\tcase reflect.Ptr:\n\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = nil\")\n\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\tfmt.Fprintln(g.out, ws+\" if \"+out+\" == nil {\")\n\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = new(\"+g.getType(t.Elem())+\")\")\n\t\tfmt.Fprintln(g.out, ws+\" }\")\n\n\t\tif err := g.genTypeDecoder(t.Elem(), \"*\"+out, tags, indent+1); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\n\tcase reflect.Map:\n\t\tkey := t.Key()\n\t\tkeyDec, ok := primitiveStringDecoders[key.Kind()]\n\t\tif !ok && !hasCustomUnmarshaler(key) {\n\t\t\treturn fmt.Errorf(\"map type %v not supported: only string and integer keys and types implementing json.Unmarshaler are allowed\", key)\n\t\t} // else assume the caller knows what they are doing and that the custom unmarshaler performs the translation from string or integer keys to the key type\n\t\telem := t.Elem()\n\t\ttmpVar := g.uniqueVarName()\n\t\tkeepEmpty := tags.required || tags.noOmitEmpty || (!g.omitEmpty && !tags.omitEmpty)\n\n\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.Delim('{')\")\n\t\tif !keepEmpty {\n\t\t\tfmt.Fprintln(g.out, ws+\" if !in.IsDelim('}') {\")\n\t\t}\n\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = make(\"+g.getType(t)+\")\")\n\t\tif !keepEmpty {\n\t\t\tfmt.Fprintln(g.out, ws+\" } else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = nil\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t}\n\n\t\tfmt.Fprintln(g.out, ws+\" for !in.IsDelim('}') {\")\n\t\t// NOTE: extra check for TextUnmarshaler. It overrides default methods.\n\t\tif reflect.PtrTo(key).Implements(reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()) {\n\t\t\tfmt.Fprintln(g.out, ws+\" var key \"+g.getType(key))\n\t\t\tfmt.Fprintln(g.out, ws+\"if data := in.UnsafeBytes(); in.Ok() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.AddError(key.UnmarshalText(data) )\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\t} else if keyDec != \"\" {\n\t\t\tfmt.Fprintln(g.out, ws+\" key := \"+g.getType(key)+\"(\"+keyDec+\")\")\n\t\t} else {\n\t\t\tfmt.Fprintln(g.out, ws+\" var key \"+g.getType(key))\n\t\t\tif err := g.genTypeDecoder(key, \"key\", tags, indent+2); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintln(g.out, ws+\" in.WantColon()\")\n\t\tfmt.Fprintln(g.out, ws+\" var \"+tmpVar+\" \"+g.getType(elem))\n\n\t\tif err := g.genTypeDecoder(elem, tmpVar, tags, indent+2); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Fprintln(g.out, ws+\" (\"+out+\")[key] = \"+tmpVar)\n\t\tfmt.Fprintln(g.out, ws+\" in.WantComma()\")\n\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\tfmt.Fprintln(g.out, ws+\" in.Delim('}')\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\n\tcase reflect.Interface:\n\t\tif t.NumMethod() != 0 {\n\t\t\tif g.interfaceIsEasyjsonUnmarshaller(t) {\n\t\t\t\tfmt.Fprintln(g.out, ws+out+\".UnmarshalEasyJSON(in)\")\n\t\t\t} else if g.interfaceIsJsonUnmarshaller(t) {\n\t\t\t\tfmt.Fprintln(g.out, ws+out+\".UnmarshalJSON(in.Raw())\")\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"interface type %v not supported: only interface{} and easyjson/json Unmarshaler are allowed\", t)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Fprintln(g.out, ws+\"if m, ok := \"+out+\".(easyjson.Unmarshaler); ok {\")\n\t\t\tfmt.Fprintln(g.out, ws+\"m.UnmarshalEasyJSON(in)\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else if m, ok := \"+out+\".(json.Unmarshaler); ok {\")\n\t\t\tfmt.Fprintln(g.out, ws+\"_ = m.UnmarshalJSON(in.Raw())\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = in.Interface()\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"don't know how to decode %v\", t)\n\t}\n\treturn nil\n\n}", "func (decode *decoder) ensureOutType(outType reflect.Type) error {\n\tswitch outType.Kind() {\n\tcase reflect.Slice:\n\t\tfallthrough\n\tcase reflect.Array:\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"cannot use \" + outType.String() + \", only slice or array supported\")\n}", "func (t *Type) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gt gobType\n\terr := dec.Decode(&gt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Type: %s\", err)\n\t}\n\tif gt.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Type encoding version %d; only 0 is supported\", gt.Version)\n\t}\n\n\tt.typeImpl = gt.Impl\n\n\treturn nil\n}", "func (decode *decoder) ensureOutInnerType(outInnerType reflect.Type) error {\n\tswitch outInnerType.Kind() {\n\tcase reflect.Struct:\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"cannot use \" + outInnerType.String() + \", only struct supported\")\n}", "func decodeDecoder(s *Stream, val reflect.Value) error {\n\tif val.Kind() == reflect.Ptr && val.IsNil() {\n\t\t// set the value to the pointer pointed to the 0 represented by the data type\n\t\tval.Set(reflect.New(val.Type().Elem()))\n\t}\n\t// transfer the reflect type back to Decoder interface type, and call DecodeRLP method\n\treturn val.Interface().(Decoder).DecodeRLP(s)\n}", "func UnmarshalType(b []byte, outputType reflect.Type) (interface{}, error) {\n\n\tif len(b) == 0 {\n\t\treturn nil, ErrEmptyInput\n\t}\n\n\tswitch string(b) {\n\tcase \"true\":\n\t\tif outputType.Kind() != reflect.Bool {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Bool}}\n\t\t}\n\t\treturn true, nil\n\tcase \"false\":\n\t\tif outputType.Kind() != reflect.Bool {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Bool}}\n\t\t}\n\t\treturn false, nil\n\tcase \"null\":\n\t\treturn nil, nil\n\t}\n\n\tfirst, _ := utf8.DecodeRune(b)\n\tif first == utf8.RuneError {\n\t\treturn nil, ErrInvalidRune\n\t}\n\n\tswitch first {\n\tcase '[', '-':\n\t\tif outputType.Kind() != reflect.Slice {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Slice}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeSlice(outputType, 0, 0))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling JSON %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\tcase '{':\n\t\tif k := outputType.Kind(); k != reflect.Map {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Map}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeMap(outputType))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling JSON %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\tcase '\"':\n\t\tif k := outputType.Kind(); k != reflect.String {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.String}}\n\t\t}\n\t\tobj := \"\"\n\t\terr := goyaml.Unmarshal(b, &obj)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling JSON %q\", string(b)))\n\t\t}\n\t\treturn obj, nil\n\t}\n\n\tif strings.Contains(string(b), \"\\n\") {\n\t\tif k := outputType.Kind(); k != reflect.Map {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Map}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeMap(outputType))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\t}\n\n\tswitch outputType.Kind() {\n\tcase reflect.Int:\n\t\ti, err := strconv.Atoi(string(b))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn i, nil\n\tcase reflect.Float64:\n\t\tf, err := strconv.ParseFloat(string(b), 64)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn f, nil\n\t}\n\treturn string(b), nil\n}", "func DecodeTyped(data []byte) (*Typed, error) {\n\tvar typed Typed\n\tif err := proto.Unmarshal(data, &typed); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &typed, nil\n}", "func (v *Value) Decode(ptr interface{}) {\n switch p := ptr.(type) {\n case *[]byte:\n *p = v.Bytes()\n case *string:\n *p = v.String()\n case *bool:\n *p = Util.ToBool(v.data)\n case *float32, *float64:\n fv := Util.ToFloat(v.data)\n rv := reflect.ValueOf(ptr).Elem()\n rv.Set(reflect.ValueOf(fv).Convert(rv.Type()))\n case *int, *int8, *int16, *int32, *int64:\n iv := Util.ToInt(v.data)\n rv := reflect.ValueOf(ptr).Elem()\n rv.Set(reflect.ValueOf(iv).Convert(rv.Type()))\n case *uint, *uint8, *uint16, *uint32, *uint64:\n iv := Util.ToInt(v.data)\n rv := reflect.ValueOf(ptr).Elem()\n rv.Set(reflect.ValueOf(iv).Convert(rv.Type()))\n default:\n if e := json.Unmarshal(v.Bytes(), ptr); e != nil {\n rv := reflect.ValueOf(ptr)\n if rv.Kind() != reflect.Ptr || rv.IsNil() {\n panic(\"Value.Decode: require a valid pointer\")\n }\n\n if rv = rv.Elem(); rv.Kind() == reflect.Interface {\n rv.Set(reflect.ValueOf(v.data))\n } else {\n panic(\"Value.Decode: \" + e.Error())\n }\n }\n }\n}", "func UnmarshalType(b []byte, outputType reflect.Type) (interface{}, error) {\n\n\tif len(b) == 0 {\n\t\treturn nil, ErrEmptyInput\n\t}\n\n\t// If the kind of the output type is interface{}, then simply use Unmarshal.\n\tif outputType.Kind() == reflect.Interface {\n\t\treturn Unmarshal(b)\n\t}\n\n\tif bytes.Equal(b, Y) || bytes.Equal(b, True) {\n\t\tif outputType.Kind() != reflect.Bool {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Bool}}\n\t\t}\n\t\treturn true, nil\n\t}\n\tif bytes.Equal(b, False) {\n\t\tif outputType.Kind() != reflect.Bool {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Bool}}\n\t\t}\n\t\treturn false, nil\n\t}\n\tif bytes.Equal(b, Null) {\n\t\treturn nil, nil\n\t}\n\n\tif bytes.HasPrefix(b, BoundaryMarker) {\n\t\tif outputType.Kind() != reflect.Slice {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Slice}}\n\t\t}\n\t\ts := NewDocumentScanner(bytes.NewReader(b), true)\n\t\tout := reflect.MakeSlice(outputType, 0, 0)\n\t\ti := 0\n\t\tfor s.Scan() {\n\t\t\tif d := s.Bytes(); len(d) > 0 {\n\t\t\t\tobj, err := UnmarshalType(d, outputType.Elem())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn out.Interface(), errors.Wrapf(err, \"error scanning document %d\", i)\n\t\t\t\t}\n\t\t\t\tout = reflect.Append(out, reflect.ValueOf(obj))\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\t\tif err := s.Err(); err != nil {\n\t\t\treturn out.Interface(), errors.Wrap(err, fmt.Sprintf(\"error scanning YAML %q\", string(b)))\n\t\t}\n\t\treturn out.Interface(), nil\n\t}\n\n\tfirst, _ := utf8.DecodeRune(b)\n\tif first == utf8.RuneError {\n\t\treturn nil, ErrInvalidRune\n\t}\n\n\tswitch first {\n\tcase '[', '-':\n\t\tif outputType.Kind() != reflect.Slice {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Slice}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeSlice(outputType, 0, 0))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\tcase '{':\n\t\tif k := outputType.Kind(); k != reflect.Map {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Map}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeMap(outputType))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\tcase '\"':\n\t\tif k := outputType.Kind(); k != reflect.String {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.String}}\n\t\t}\n\t\tobj := \"\"\n\t\terr := goyaml.Unmarshal(b, &obj)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn obj, nil\n\t}\n\n\tif _, _, ok := ParseKeyValue(b); ok {\n\t\tk := outputType.Kind()\n\n\t\tif k == reflect.Map {\n\t\t\tptr := reflect.New(outputType)\n\t\t\tptr.Elem().Set(reflect.MakeMap(outputType))\n\t\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q into map\", string(b)))\n\t\t\t}\n\t\t\treturn ptr.Elem().Interface(), nil\n\t\t}\n\n\t\tif k == reflect.Struct {\n\t\t\tptr := reflect.New(outputType)\n\t\t\tptr.Elem().Set(reflect.Zero(outputType))\n\t\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q into struct\", string(b)))\n\t\t\t}\n\t\t\treturn ptr.Elem().Interface(), nil\n\t\t}\n\n\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Map, reflect.Struct}}\n\t}\n\n\tswitch outputType.Kind() {\n\tcase reflect.Int:\n\t\ti, err := strconv.Atoi(string(b))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn i, nil\n\tcase reflect.Float64:\n\t\tf, err := strconv.ParseFloat(string(b), 64)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn f, nil\n\tcase reflect.String:\n\t\tstr := strings.TrimSpace(string(b))\n\t\tif len(str) > 0 {\n\t\t\treturn str, nil\n\t\t}\n\t\t// if empty string, then return nil\n\t\treturn nil, nil\n\t}\n\n\treturn nil, errors.Errorf(\"could not unmarshal YAML %q into type %v\", string(b), outputType)\n}", "func (d *Decoder) DecodeInterface() (interface{}, error) {\n\tc, err := d.readCode()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif msgpcode.IsFixedNum(c) {\n\t\treturn int8(c), nil\n\t}\n\tif msgpcode.IsFixedMap(c) {\n\t\terr = d.s.UnreadByte()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn d.decodeMapDefault()\n\t}\n\tif msgpcode.IsFixedArray(c) {\n\t\treturn d.decodeSlice(c)\n\t}\n\tif msgpcode.IsFixedString(c) {\n\t\treturn d.string(c)\n\t}\n\n\tswitch c {\n\tcase msgpcode.Nil:\n\t\treturn nil, nil\n\tcase msgpcode.False, msgpcode.True:\n\t\treturn d.bool(c)\n\tcase msgpcode.Float:\n\t\treturn d.float32(c)\n\tcase msgpcode.Double:\n\t\treturn d.float64(c)\n\tcase msgpcode.Uint8:\n\t\treturn d.uint8()\n\tcase msgpcode.Uint16:\n\t\treturn d.uint16()\n\tcase msgpcode.Uint32:\n\t\treturn d.uint32()\n\tcase msgpcode.Uint64:\n\t\treturn d.uint64()\n\tcase msgpcode.Int8:\n\t\treturn d.int8()\n\tcase msgpcode.Int16:\n\t\treturn d.int16()\n\tcase msgpcode.Int32:\n\t\treturn d.int32()\n\tcase msgpcode.Int64:\n\t\treturn d.int64()\n\tcase msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32:\n\t\treturn d.bytes(c, nil)\n\tcase msgpcode.Str8, msgpcode.Str16, msgpcode.Str32:\n\t\treturn d.string(c)\n\tcase msgpcode.Array16, msgpcode.Array32:\n\t\treturn d.decodeSlice(c)\n\tcase msgpcode.Map16, msgpcode.Map32:\n\t\terr = d.s.UnreadByte()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn d.decodeMapDefault()\n\tcase msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16,\n\t\tmsgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32:\n\t\treturn d.decodeInterfaceExt(c)\n\t}\n\n\treturn 0, fmt.Errorf(\"msgpack: unknown code %x decoding interface{}\", c)\n}", "func (d *Decoder) Decode(ctx context.Context, ref blob.Ref, obj interface{}) error {\n\tif u, ok := obj.(Unmarshaler); ok {\n\t\treturn u.PkUnmarshal(ctx, d.src, ref)\n\t}\n\n\tv := reflect.ValueOf(obj)\n\tt := v.Type()\n\tif t.Kind() != reflect.Ptr {\n\t\treturn ErrNotPointer\n\t}\n\tif v.IsNil() {\n\t\treturn ErrNilPointer\n\t}\n\n\tr, size, err := d.src.Fetch(ctx, ref)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"fetching %s from src\", ref)\n\t}\n\tdefer r.Close()\n\n\ts, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"reading body of %s\", ref)\n\t}\n\n\telTyp := t.Elem()\n\n\tswitch elTyp.Kind() {\n\tcase reflect.Bool:\n\t\tp := obj.(*bool)\n\t\t*p = (size > 0)\n\t\treturn nil\n\n\tcase reflect.Int:\n\t\tn, err := strconv.ParseInt(string(s), 10, 0)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int)\n\t\t*p = int(n)\n\t\treturn nil\n\n\tcase reflect.Int8:\n\t\tn, err := strconv.ParseInt(string(s), 10, 8)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int8 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int8)\n\t\t*p = int8(n)\n\t\treturn nil\n\n\tcase reflect.Int16:\n\t\tn, err := strconv.ParseInt(string(s), 10, 16)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int16 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int16)\n\t\t*p = int16(n)\n\t\treturn nil\n\n\tcase reflect.Int32:\n\t\tn, err := strconv.ParseInt(string(s), 10, 32)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int32 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int32)\n\t\t*p = int32(n)\n\t\treturn nil\n\n\tcase reflect.Int64:\n\t\tn, err := strconv.ParseInt(string(s), 10, 64)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int64 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int64)\n\t\t*p = n\n\t\treturn nil\n\n\tcase reflect.Uint:\n\t\tn, err := strconv.ParseUint(string(s), 10, 0)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint)\n\t\t*p = uint(n)\n\t\treturn nil\n\n\tcase reflect.Uint8:\n\t\tn, err := strconv.ParseUint(string(s), 10, 8)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint8 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint8)\n\t\t*p = uint8(n)\n\t\treturn nil\n\n\tcase reflect.Uint16:\n\t\tn, err := strconv.ParseUint(string(s), 10, 16)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint16 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint16)\n\t\t*p = uint16(n)\n\t\treturn nil\n\n\tcase reflect.Uint32:\n\t\tn, err := strconv.ParseUint(string(s), 10, 32)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint32 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint32)\n\t\t*p = uint32(n)\n\t\treturn nil\n\n\tcase reflect.Uint64:\n\t\tn, err := strconv.ParseUint(string(s), 10, 64)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint64 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint64)\n\t\t*p = n\n\t\treturn nil\n\n\tcase reflect.Float32:\n\t\tf, err := strconv.ParseFloat(string(s), 32)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing float32 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*float32)\n\t\t*p = float32(f)\n\t\treturn nil\n\n\tcase reflect.Float64:\n\t\tf, err := strconv.ParseFloat(string(s), 64)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing float64 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*float64)\n\t\t*p = f\n\t\treturn nil\n\n\tcase reflect.Array:\n\t\tvar refs []blob.Ref\n\t\tdec := d.newJSONDecoder(bytes.NewReader(s))\n\t\terr := dec.Decode(&refs)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"JSON-decoding blobref array\")\n\t\t}\n\t\tarr := v.Elem()\n\t\treturn d.buildArray(ctx, arr, refs)\n\n\tcase reflect.Slice:\n\t\tvar refs []blob.Ref\n\t\tdec := d.newJSONDecoder(bytes.NewReader(s))\n\t\terr := dec.Decode(&refs)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"JSON-decoding blobref slice\")\n\t\t}\n\t\tslice := v.Elem()\n\t\tslice, err = d.buildSlice(ctx, slice, refs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Elem().Set(slice)\n\t\treturn nil\n\n\tcase reflect.Map:\n\t\tkt := elTyp.Key()\n\t\tmt := reflect.MapOf(kt, reftype)\n\t\tmm := reflect.New(mt)\n\t\tdec := d.newJSONDecoder(bytes.NewReader(s))\n\t\terr := dec.Decode(mm.Interface())\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"JSON-decoding map[K]blob.Ref\")\n\t\t}\n\t\treturn d.buildMap(ctx, v.Elem(), mm.Elem())\n\n\tcase reflect.String:\n\t\tp := obj.(*string)\n\t\t*p = string(s)\n\t\treturn nil\n\n\tcase reflect.Struct:\n\t\t// Construct an intermediate struct type for JSON-unmarshaling into.\n\n\t\tvar ftypes []reflect.StructField\n\t\tfor i := 0; i < elTyp.NumField(); i++ {\n\t\t\ttf := elTyp.Field(i)\n\t\t\tname, o := parseTag(tf)\n\t\t\ttf.Tag = reflect.StructTag(fmt.Sprintf(`%s json:\"%s\"`, tf.Tag, name))\n\t\t\tif o.omit || o.inline {\n\t\t\t\tftypes = append(ftypes, tf)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !o.external {\n\t\t\t\tswitch tf.Type.Kind() {\n\t\t\t\tcase reflect.Slice:\n\t\t\t\t\ttf.Type = reflect.SliceOf(reftype)\n\t\t\t\t\tftypes = append(ftypes, tf)\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase reflect.Array:\n\t\t\t\t\ttf.Type = reflect.SliceOf(reftype) // sic, not ArrayOf\n\t\t\t\t\tftypes = append(ftypes, tf)\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase reflect.Map:\n\t\t\t\t\ttf.Type = reflect.MapOf(tf.Type.Key(), reftype)\n\t\t\t\t\tftypes = append(ftypes, tf)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\ttf.Type = reftype\n\t\t\tftypes = append(ftypes, tf)\n\t\t}\n\t\tintermediateTyp := reflect.StructOf(ftypes)\n\t\tintermediateStruct := reflect.New(intermediateTyp)\n\t\tdec := d.newJSONDecoder(bytes.NewReader(s))\n\t\terr := dec.Decode(intermediateStruct.Interface())\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"JSON-decoding into intermediate struct\")\n\t\t}\n\n\t\tstructVal := v.Elem()\n\t\tfor i := 0; i < elTyp.NumField(); i++ {\n\t\t\ttf := elTyp.Field(i)\n\t\t\tname, o := parseTag(tf)\n\t\t\tif o.omit {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfield := structVal.Field(i)\n\t\t\tifield := intermediateStruct.Elem().Field(i)\n\t\t\tif o.inline {\n\t\t\t\tfield.Set(ifield)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !o.external {\n\t\t\t\tswitch tf.Type.Kind() {\n\t\t\t\tcase reflect.Slice:\n\t\t\t\t\trefs := ifield.Interface().([]blob.Ref)\n\t\t\t\t\tslice, err := d.buildSlice(ctx, field, refs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err, \"building slice for field %s\", name)\n\t\t\t\t\t}\n\t\t\t\t\tfield.Set(slice)\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase reflect.Array:\n\t\t\t\t\trefs := ifield.Interface().([]blob.Ref)\n\t\t\t\t\terr = d.buildArray(ctx, field, refs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err, \"building array for field %s\", name)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase reflect.Map:\n\t\t\t\t\terr = d.buildMap(ctx, field, ifield)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err, \"building map for field %s\", name)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ifield.IsZero() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfieldRef := ifield.Interface().(blob.Ref)\n\t\t\tnewFieldVal := reflect.New(tf.Type)\n\t\t\terr = d.Decode(ctx, fieldRef, newFieldVal.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"decoding ref %s for field %s\", fieldRef, name)\n\t\t\t}\n\t\t\tfield.Set(newFieldVal.Elem())\n\t\t}\n\t\treturn nil\n\n\tcase reflect.Ptr:\n\t\tptr := v.Elem()\n\t\tif ptr.IsNil() {\n\t\t\tnewItem := reflect.New(elTyp.Elem())\n\t\t\tv.Elem().Set(newItem)\n\t\t}\n\t\t// Recursively unmarshal into the thing ptr points to.\n\t\treturn d.Decode(ctx, ref, ptr.Interface())\n\n\tdefault:\n\t\treturn ErrUnsupportedType{Name: t.Name()}\n\t}\n}", "func (b *ProxyTypeBox) Decode(buf *bin.Buffer) error {\n\tif b == nil {\n\t\treturn fmt.Errorf(\"unable to decode ProxyTypeBox to nil\")\n\t}\n\tv, err := DecodeProxyType(buf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to decode boxed value: %w\", err)\n\t}\n\tb.ProxyType = v\n\treturn nil\n}", "func (v *Type) Decode(sr stream.Reader) error {\n\n\tif err := sr.ReadStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tfh, ok, err := sr.ReadFieldBegin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor ok {\n\t\tswitch {\n\t\tcase fh.ID == 1 && fh.Type == wire.TI32:\n\t\t\tvar x SimpleType\n\t\t\tx, err = _SimpleType_Decode(sr)\n\t\t\tv.SimpleType = &x\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 2 && fh.Type == wire.TStruct:\n\t\t\tv.SliceType, err = _Type_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 3 && fh.Type == wire.TStruct:\n\t\t\tv.KeyValueSliceType, err = _TypePair_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 4 && fh.Type == wire.TStruct:\n\t\t\tv.MapType, err = _TypePair_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 5 && fh.Type == wire.TStruct:\n\t\t\tv.ReferenceType, err = _TypeReference_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 6 && fh.Type == wire.TStruct:\n\t\t\tv.PointerType, err = _Type_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tdefault:\n\t\t\tif err := sr.Skip(fh.Type); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := sr.ReadFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fh, ok, err = sr.ReadFieldBegin(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := sr.ReadStructEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tcount := 0\n\tif v.SimpleType != nil {\n\t\tcount++\n\t}\n\tif v.SliceType != nil {\n\t\tcount++\n\t}\n\tif v.KeyValueSliceType != nil {\n\t\tcount++\n\t}\n\tif v.MapType != nil {\n\t\tcount++\n\t}\n\tif v.ReferenceType != nil {\n\t\tcount++\n\t}\n\tif v.PointerType != nil {\n\t\tcount++\n\t}\n\tif count != 1 {\n\t\treturn fmt.Errorf(\"Type should have exactly one field: got %v fields\", count)\n\t}\n\n\treturn nil\n}", "func (d *Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {\n\td.log.Debugf(\"decoding type %s\", d.valType)\n\t// if we are dealing with an empty interface, skip it.\n\tif d.isEmptyInterface(ptr) {\n\t\td.log.Warn(\"cannot encode to empty interface\")\n\t\titer.Skip()\n\t\treturn\n\t}\n\t// we really shouldn't be here with an invalid token, if for\n\t// some reason we are, call the default decoder and bail.\n\tif d.token == InvalidToken {\n\t\td.log.Warn(\"invalid token\")\n\t\td.decoder.Decode(ptr, iter)\n\t\treturn\n\t}\n\t// get the from type\n\tfromType := iter.WhatIsNext()\n\t// secure tokens will be type string. if this is not\n\t// a string, call the default decoder and bail.\n\tif fromType != jsoniter.StringValue {\n\t\td.log.Debug(\"skipping non-string value\")\n\t\td.decoder.Decode(ptr, iter)\n\t\treturn\n\t}\n\t// read the string & for mat a key\n\tkey := Key(iter.ReadString())\n\t// check to see if it is one of ours\n\tif !key.IsTokenKey(d.token) {\n\t\t// we use an Iterator avoid setting the ptr directly since it might be a string\n\t\t// or an interface or who knows what. this was the codecs handle it for us.\n\t\tsubIter := iter.Pool().BorrowIterator([]byte(fmt.Sprintf(`\"%s\"`, key)))\n\t\tdefer iter.Pool().ReturnIterator(subIter)\n\t\td.log.Debugf(\"decode string: %s\", key)\n\t\t// decode the string\n\t\td.decoder.Decode(ptr, subIter)\n\t\treturn\n\t}\n\t// we have a valid lookup key. look it up in our table\n\tval, err := d.lookupKey(key)\n\t// did we find something in the lookup table?\n\tif err != nil || val == nil {\n\t\td.log.Debugf(\"lookup entry not found: %s\", key)\n\t\t// this is expected when sparse decoding a struct.\n\t\tif d.valType.Kind() == reflect.Interface {\n\t\t\td.log.Debugf(\"decode empty %s for interface\", key.Kind())\n\t\t\t// if we have a map then set an explicitly typed empty value\n\t\t\t*(*interface{})(ptr) = emptyValueOfKind(key.Kind())\n\t\t}\n\t\treturn\n\t}\n\t// clear the buffer\n\td.stream.Reset(nil)\n\tval.WriteTo(d.stream)\n\tsubIter := iter.Pool().BorrowIterator(d.stream.Buffer())\n\tdefer iter.Pool().ReturnIterator(subIter)\n\t// decode the string\n\td.decoder.Decode(ptr, subIter)\n\td.log.Debugf(\"decoded lookup entry for %s: %s\", key, string(d.stream.Buffer()))\n}", "func decodeWireType(r io.ByteReader) *wireType {\n\twt := new(wireType)\n\tf := -1\n\tfor {\n\t\tdf, err := decodeUint(r)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif df == 0 {\n\t\t\tbreak\n\t\t}\n\t\tf += int(df)\n\t\tswitch f {\n\t\tcase 0:\n\t\t\twt.ArrayT = decodeArrayType(r)\n\t\tcase 1:\n\t\t\twt.SliceT = decodeSliceType(r)\n\t\tcase 2:\n\t\t\twt.StructT = decodeStructType(r)\n\t\tcase 3:\n\t\t\twt.MapT = decodeMapType(r)\n\t\t}\n\t}\n\treturn wt\n}", "func Decode(reader io.Reader, boundary string) (Type, error) {\n\tr := bufio.NewReader(reader)\n\tif len(boundary) > 0 {\n\t\treturn decodeform(r, boundary)\n\t}\n\tpeek, err := r.Peek(2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch string(peek) {\n\tcase `--`:\n\t\treturn decodeform(r, boundary)\n\t}\n\treturn decodevars(r)\n}", "func (d *Decoder) Decode(v interface{}) (err error) {\n\t// v must be a pointer\n\tpval := reflect.ValueOf(v)\n\tif pval.Kind() != reflect.Ptr || pval.IsNil() {\n\t\treturn errBadPointer\n\t}\n\n\t// catch decoding panics and convert them to errors\n\t// note that this allows us to skip boundary checks during decoding\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"could not decode type %s: %v\", pval.Elem().Type().String(), r)\n\t\t}\n\t}()\n\n\t// reset the read count\n\td.n = 0\n\n\td.decode(pval.Elem())\n\treturn\n}", "func (s *impl) Decode(val interface{}, tType model.Type) (ret model.Value, err error) {\n\tif !tType.IsBasic() {\n\t\terr = fmt.Errorf(\"illegal value type, type:%s\", tType.GetName())\n\t\treturn\n\t}\n\n\tswitch tType.GetValue() {\n\tcase util.TypeBooleanField:\n\t\tret, err = s.decodeBool(val, tType)\n\tcase util.TypeDateTimeField:\n\t\tret, err = s.decodeDateTime(val, tType)\n\tcase util.TypeFloatField, util.TypeDoubleField:\n\t\tret, err = s.decodeFloat(val, tType)\n\tcase util.TypeBitField, util.TypeSmallIntegerField, util.TypeInteger32Field, util.TypeIntegerField, util.TypeBigIntegerField:\n\t\tret, err = s.decodeInt(val, tType)\n\tcase util.TypePositiveBitField, util.TypePositiveSmallIntegerField, util.TypePositiveInteger32Field, util.TypePositiveIntegerField, util.TypePositiveBigIntegerField:\n\t\tret, err = s.decodeUint(val, tType)\n\tcase util.TypeSliceField:\n\t\tret, err = s.decodeSlice(val, tType)\n\tcase util.TypeStringField:\n\t\tret, err = s.decodeString(val, tType)\n\tdefault:\n\t\terr = fmt.Errorf(\"illegal type, type:%s\", tType.GetName())\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif tType.IsPtrType() && !ret.IsNil() {\n\t\tret = ret.Addr()\n\t}\n\n\treturn\n}", "func nakedDecode(src []byte, t Type) (int, error) {\n\t// decode header\n\thl, _, rl, err := headerDecode(src, t)\n\n\t// check remaining length\n\tif rl != 0 {\n\t\treturn hl, makeError(t, \"expected zero remaining length\")\n\t}\n\n\treturn hl, err\n}", "func makeDecoder(typ reflect.Type, tags tags) (dec decoder, err error) {\n\tkind := typ.Kind()\n\tswitch {\n\tcase typ == rawValueType:\n\t\treturn decodeRawValue, nil\n\t// for data that implemented DecodeRLP method (pointer receiver)\n\tcase typ.Implements(decoderInterface):\n\t\treturn decodeDecoder, nil\n\t// pointer type of the variable implements the decoder interface (pointer receiver)\n\tcase kind != reflect.Ptr && reflect.PtrTo(typ).Implements(decoderInterface):\n\t\treturn decodeDecoderNoPtr, nil\n\t// if the type is *bigInt\n\tcase typ.AssignableTo(reflect.PtrTo(bigInt)):\n\t\treturn decodeBigInt, nil\n\tcase typ.AssignableTo(bigInt):\n\t\treturn decodeBigIntNoPtr, nil\n\tcase isUint(kind):\n\t\treturn decodeUint, nil\n\tcase kind == reflect.Bool:\n\t\treturn decodeBool, nil\n\tcase kind == reflect.String:\n\t\treturn decodeString, nil\n\tcase kind == reflect.Slice || kind == reflect.Array:\n\t\treturn makeListDecoder(typ, tags)\n\tcase kind == reflect.Struct:\n\t\treturn makeStructDecoder(typ)\n\tcase kind == reflect.Ptr:\n\t\tif tags.nilOK {\n\t\t\treturn makeOptionalPtrDecoder(typ)\n\t\t}\n\t\treturn makePtrDecoder(typ)\n\tcase kind == reflect.Interface:\n\t\treturn decodeInterface, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"rlp: type %v is not RLP-serializable\", typ)\n\t}\n}", "func DecodeFrom(d encoding.Decoder, x interface{}, typ reflect.Type) error {\n\tfrom := reflect.New(typ)\n\tif err := d.Decode(from.Interface()); err != nil {\n\t\treturn err\n\t}\n\treturn convertFrom(reflect.ValueOf(x), from)\n}", "func (t *RawJSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\treturn errors.New(\"binary datatype is not supported by RawJSONTranscoder\")\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"string datatype is not supported by RawJSONTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *string:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode raw JSON data in a byte array or string\")\n\t\t}\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func makeOptionalPtrDecoder(typ reflect.Type) (decoder, error) {\n\t// get the underlying element type and the corresponded decoder\n\tetype := typ.Elem()\n\tetypeinfo, err := cachedTypeInfo1(etype, tags{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdec := func(s *Stream, val reflect.Value) (err error) {\n\t\tkind, size, err := s.Kind()\n\t\t// criteria on checking if the value is empty\n\t\tif err != nil || size == 0 && kind != Byte {\n\t\t\ts.kind = -1 // rearm the kind\n\t\t\tval.Set(reflect.Zero(typ)) // set the value to be 0 with the type pointed by the pointer\n\t\t\treturn err\n\t\t}\n\t\tnewval := val\n\n\t\t// if the val pointed to nil, allocates space (allocate space in storage)\n\t\tif val.IsNil() {\n\t\t\tnewval = reflect.New(etype)\n\t\t}\n\t\t// decode data and set val\n\t\tif err = etypeinfo.decoder(s, newval.Elem()); err == nil {\n\t\t\tval.Set(newval)\n\t\t}\n\t\treturn err\n\t}\n\treturn dec, nil\n}", "func protoDec(t reflect.Type, in []byte) (T, error) {\n\tvar p protoreflect.ProtoMessage\n\tswitch it := reflect.New(t.Elem()).Interface().(type) {\n\tcase protoreflect.ProtoMessage:\n\t\tp = it\n\tcase protov1.Message:\n\t\tp = protov1.MessageV2(it)\n\t}\n\terr := protov2.UnmarshalOptions{}.Unmarshal(in, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}", "func Decode(dec Decoder, v interface{}) error {\n\tvar err error\n\tswitch vt := v.(type) {\n\tcase *string:\n\t\terr = dec.String(vt)\n\tcase **string:\n\t\tif vt == nil {\n\t\t\t*vt = new(string)\n\t\t}\n\t\terr = dec.String(*vt)\n\tcase *int:\n\t\terr = dec.Int(vt)\n\tcase **int:\n\t\tif vt == nil {\n\t\t\t*vt = new(int)\n\t\t}\n\t\terr = dec.Int(*vt)\n\tcase *int8:\n\t\terr = dec.Int8(vt)\n\tcase **int8:\n\t\tif vt == nil {\n\t\t\t*vt = new(int8)\n\t\t}\n\t\terr = dec.Int8(*vt)\n\tcase *int16:\n\t\terr = dec.Int16(vt)\n\tcase **int16:\n\t\tif vt == nil {\n\t\t\t*vt = new(int16)\n\t\t}\n\t\terr = dec.Int16(*vt)\n\tcase *int32:\n\t\terr = dec.Int32(vt)\n\tcase **int32:\n\t\tif vt == nil {\n\t\t\t*vt = new(int32)\n\t\t}\n\t\terr = dec.Int32(*vt)\n\tcase *int64:\n\t\terr = dec.Int64(vt)\n\tcase **int64:\n\t\tif vt == nil {\n\t\t\t*vt = new(int64)\n\t\t}\n\t\terr = dec.Int64(*vt)\n\tcase *uint8:\n\t\terr = dec.UInt8(vt)\n\tcase **uint8:\n\t\tif vt == nil {\n\t\t\t*vt = new(uint8)\n\t\t}\n\t\terr = dec.UInt8(*vt)\n\tcase *uint16:\n\t\terr = dec.UInt16(vt)\n\tcase **uint16:\n\t\tif vt == nil {\n\t\t\t*vt = new(uint16)\n\t\t}\n\t\terr = dec.UInt16(*vt)\n\tcase *uint32:\n\t\terr = dec.UInt32(vt)\n\tcase **uint32:\n\t\tif vt == nil {\n\t\t\t*vt = new(uint32)\n\t\t}\n\t\terr = dec.UInt32(*vt)\n\tcase *uint64:\n\t\terr = dec.UInt64(vt)\n\tcase **uint64:\n\t\tif vt == nil {\n\t\t\t*vt = new(uint64)\n\t\t}\n\t\terr = dec.UInt64(*vt)\n\tcase *float64:\n\t\terr = dec.Float64(vt)\n\tcase **float64:\n\t\tif vt == nil {\n\t\t\t*vt = new(float64)\n\t\t}\n\t\terr = dec.Float64(*vt)\n\tcase *float32:\n\t\terr = dec.Float32(vt)\n\tcase **float32:\n\t\tif vt == nil {\n\t\t\t*vt = new(float32)\n\t\t}\n\t\terr = dec.Float32(*vt)\n\tcase *bool:\n\t\terr = dec.Bool(vt)\n\tcase **bool:\n\t\tif vt == nil {\n\t\t\t*vt = new(bool)\n\t\t}\n\t\terr = dec.Bool(*vt)\n\tcase DecodableObject:\n\t\terr = dec.Object(vt)\n\tcase DecodableList:\n\t\terr = dec.List(vt)\n\tdefault:\n\t\terr = ErrUndecodable\n\t}\n\treturn err\n}", "func (msg *Message) Decode(out interface{}) error {\n\tif msg.reader == nil {\n\t\tmsg.reader = bytes.NewReader(msg.Data)\n\t}\n\tdefer msg.c.decoderState.PushReader(msg.reader)()\n\treturn pvdata.Decode(msg.c.decoderState, out)\n}", "func (f *Frame) Read(out interface{}) error {\n\tswitch x := out.(type) {\n\tcase *uint8:\n\t\tif f.BytesRemaining() < 1 {\n\t\t\treturn io.EOF\n\t\t}\n\t\t*x = f.Payload[f.payloadPos]\n\t\tf.payloadPos++\n\tcase *uint16:\n\t\tif f.BytesRemaining() < 2 {\n\t\t\treturn io.EOF\n\t\t}\n\t\t*x = binary.LittleEndian.Uint16(f.Payload[f.payloadPos:])\n\t\tf.payloadPos += 2\n\tcase *uint32:\n\t\tif f.BytesRemaining() < 4 {\n\t\t\treturn io.EOF\n\t\t}\n\t\t*x = binary.LittleEndian.Uint32(f.Payload[f.payloadPos:])\n\t\tf.payloadPos += 4\n\tdefault:\n\t\tv := reflect.ValueOf(out)\n\t\tif v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {\n\t\t\telem := v.Elem()\n\t\t\tfor i := 0; i < elem.NumField(); i++ {\n\t\t\t\tif err := f.Read(elem.Field(i).Addr().Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif v.Kind() == reflect.Slice {\n\t\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\t\tif err := f.Read(v.Index(i).Addr().Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tpanic(fmt.Errorf(\"can't decode MSP payload into type %v\", out))\n\t}\n\treturn nil\n}", "func TypeFromByte(b byte) Type {\n\tswitch b {\n\tcase 0x0f:\n\t\treturn ResetMsg\n\tcase 0x10:\n\t\treturn RunMsg\n\tcase 0x2f:\n\t\treturn DiscardMsg\n\tcase 0x3f:\n\t\treturn PullMsg\n\tcase 0x71:\n\t\treturn RecordMsg\n\tcase 0x70:\n\t\treturn SuccessMsg\n\tcase 0x7e:\n\t\treturn IgnoreMsg\n\tcase 0x7f:\n\t\treturn FailureMsg\n\tcase 0x01:\n\t\treturn HelloMsg\n\tcase 0x02:\n\t\treturn GoodbyeMsg\n\tcase 0x11:\n\t\treturn BeginMsg\n\tcase 0x12:\n\t\treturn CommitMsg\n\tcase 0x13:\n\t\treturn RollbackMsg\n\tdefault:\n\t\treturn UnknownMsg\n\t}\n}", "func (d *Decoder) Decode(v interface{}) (err error) {\n\trv := reflect.Indirect(reflect.ValueOf(v))\n\tif !rv.CanAddr() {\n\t\treturn errors.New(\"binary: can only Decode to pointer type\")\n\t}\n\n\t// Scan the type (this will load from cache)\n\tvar c Codec\n\tif c, err = scan(rv.Type()); err == nil {\n\t\terr = c.DecodeTo(d, rv)\n\t}\n\n\treturn\n}", "func readResponse(p packetType) (response responseType, err error) {\n\t// The calls to bencode.Unmarshal() can be fragile.\n\tdefer func() {\n\t\tif x := recover(); x != nil {\n\t\t\tlogger.Infof(\"DHT: !!! Recovering from panic() after bencode.Unmarshal %q, %v\", string(p.b), x)\n\t\t}\n\t}()\n\tif e2 := bencode.Unmarshal(bytes.NewBuffer(p.b), &response); e2 == nil {\n\t\terr = nil\n\t\treturn\n\t} else {\n\t\tlogger.Infof(\"DHT: unmarshal error, odd or partial data during UDP read? %v, err=%s\", string(p.b), e2)\n\t\treturn response, e2\n\t}\n\treturn\n}", "func jsonDec(t reflect.Type, in []byte) (T, error) {\n\tval := reflect.New(t)\n\tif err := jsonx.Unmarshal(val.Interface(), in); err != nil {\n\t\treturn nil, err\n\t}\n\treturn val.Elem().Interface(), nil\n}", "func (s *Stream) Decode(val interface{}) error {\n\t// if val does not pointed to any address, there is no place to store the decoded data\n\tif val == nil {\n\t\treturn errDecodeIntoNil\n\t}\n\n\t// getting the value and the type of val\n\trval := reflect.ValueOf(val)\n\trtyp := rval.Type()\n\n\t// the passed in val must be a pointer\n\tif rtyp.Kind() != reflect.Ptr {\n\t\treturn errNoPointer\n\t}\n\n\t// checked again if passed in val is pointed to nil\n\tif rval.IsNil() {\n\t\treturn errDecodeIntoNil\n\t}\n\n\t// get the decoder based on the data type pointed by the val\n\tinfo, err := cachedTypeInfo(rtyp.Elem(), tags{})\n\tif err != nil {\n\t\treturn err\n\t}\n\t// passed in stream as well as the value of data pointed by val that will be used to store the decoded data\n\terr = info.decoder(s, rval.Elem())\n\n\t// check if the err is type *decodeError and the length of ctx is greater than 0\n\tif decErr, ok := err.(*decodeError); ok && len(decErr.ctx) > 0 {\n\t\tdecErr.ctx = append(decErr.ctx, fmt.Sprint(\"(\", rtyp.Elem(), \")\"))\n\t}\n\treturn err\n}", "func (t *RawStringTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\treturn errors.New(\"only string datatype is supported by RawStringTranscoder\")\n\t} else if valueType == gocbcore.StringType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *string:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode a string in a string or interface\")\n\t\t}\n\t} else if valueType == gocbcore.JSONType {\n\t\treturn errors.New(\"only string datatype is supported by RawStringTranscoder\")\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func DecodeResponse(response []byte, responseType interface{}) (interface{}, error) {\n\tif err := json.Unmarshal([]byte(response), &responseType); err != nil {\n\t\tlog.Printf(\"error detected unmarshalling response: %v\", err)\n\t\treturn nil, err\n\t}\n\treturn responseType, nil\n}", "func Decode(v interface{}) error {\n\tval := reflect.ValueOf(v)\n\tt := reflect.TypeOf(v)\n\n\tif val.Kind() != reflect.Ptr {\n\t\treturn newDecodeError(\"must decode to pointer\", \"\", nil)\n\t}\n\n\tfor t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\n\t\tif val.IsNil() {\n\t\t\tval.Set(reflect.New(t))\n\t\t}\n\n\t\tval = val.Elem()\n\t}\n\n\tif t.Kind() != reflect.Struct {\n\t\treturn newDecodeError(fmt.Sprintf(\"cannot decode into value of type: %s\", t.String()), \"\", nil)\n\t}\n\n\tnewVal := reflect.New(t)\n\n\terr := decodeFields(val, newVal.Elem())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tval.Set(newVal.Elem())\n\n\treturn nil\n}", "func (d *Decoder) Decode(v interface{}) error {\n\tval := reflect.ValueOf(v)\n\ttyp := reflect.TypeOf(v)\n\n\tif typ.Kind() != reflect.Ptr {\n\t\treturn &TypeError{typ}\n\t}\n\n\tif typ.Elem().Kind() == reflect.Slice {\n\t\treturn d.decodeSlice(val)\n\t}\n\tp, err := d.getPairs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch typ.Elem().Kind() {\n\tdefault:\n\t\treturn &TypeError{val.Type()}\n\tcase reflect.Map:\n\t\tif val.Elem().IsNil() {\n\t\t\tval.Elem().Set(reflect.MakeMap(typ.Elem()))\n\t\t}\n\t\treturn d.saveMap(p, val.Elem())\n\tcase reflect.Struct:\n\t\tif val.IsNil() {\n\t\t\treturn &TypeError{nil}\n\t\t}\n\t\treturn d.saveStruct(p, val.Elem())\n\t}\n\treturn nil\n}", "func (f DecoderFunc) Decode(v interface{}) error { return f(v) }", "func unpack(value nlgo.Binary, out interface{}) error {\n\treturn binary.Read(bytes.NewReader(([]byte)(value)), binary.BigEndian, out)\n}", "func (p Typed) Decode() (fx.Message, error) {\n\tmsgType, ok := MessageTypes[p.TypeId]\n\tif !ok {\n\t\treturn nil, &ErrUnknownType{TypeID: p.TypeId}\n\t}\n\tmsg := msgType.NewMessage()\n\tserializable := msg.(SerializableMessage).Serializable()\n\tif err := proto.Unmarshal(p.Message, serializable); err != nil {\n\t\treturn nil, err\n\t}\n\treturn msg, nil\n}", "func decodeConfigMapT(t *testing.T, data []byte, typMap map[string]reflect.Type) (cfg configMapT, err error) {\n\tif data == nil || len(data) == 0 {\n\t\treturn nil, errors.New(\"nil byte\")\n\t}\n\tres, err := ToObject(data, typMap)\n\tif err != nil {\n\t\tt.Errorf(\"failed decode config map bytes: %v, %v\\n\", base64.StdEncoding.EncodeToString(data), err)\n\t\treturn nil, err\n\t}\n\n\tif sn, ok := res.(map[interface{}]interface{}); ok && len(sn) == 0 {\n\t\treturn configMapT{}, nil\n\t}\n\n\tt.Log(\"decoded: \", res)\n\tif sn, ok := res.(configMapT); ok {\n\t\tcfg = sn\n\t\treturn\n\t}\n\tt.Errorf(\"unexpect decode config map result: %v, type:%v, base64:%v\\n\", res, reflect.TypeOf(res), base64.StdEncoding.EncodeToString(data))\n\terr = errors.New(\"failed to decode config map\")\n\treturn\n}", "func schemaDec(t reflect.Type, in []byte) (T, error) {\n\tswitch t.Kind() {\n\tcase reflect.Slice, reflect.Array:\n\t\tt = t.Elem()\n\t}\n\tdecMu.Lock()\n\tdec, ok := schemaDecs[t]\n\tif !ok {\n\t\tvar err error\n\t\tdec, err = coder.RowDecoderForStruct(t)\n\t\tif err != nil {\n\t\t\tdecMu.Unlock()\n\t\t\treturn nil, err\n\t\t}\n\t\tschemaDecs[t] = dec\n\t}\n\tdecMu.Unlock()\n\tbuf := bytes.NewBuffer(in)\n\tval, err := dec(buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn val, nil\n}", "func (dec *Decoder) Decode(v interface{}) error {\n\tif dec.isPooled == 1 {\n\t\tpanic(InvalidUsagePooledDecoderError(\"Invalid usage of pooled decoder\"))\n\t}\n\tvar err error\n\tswitch vt := v.(type) {\n\tcase *string:\n\t\terr = dec.decodeString(vt)\n\tcase **string:\n\t\terr = dec.decodeStringNull(vt)\n\tcase *int:\n\t\terr = dec.decodeInt(vt)\n\tcase **int:\n\t\terr = dec.decodeIntNull(vt)\n\tcase *int8:\n\t\terr = dec.decodeInt8(vt)\n\tcase **int8:\n\t\terr = dec.decodeInt8Null(vt)\n\tcase *int16:\n\t\terr = dec.decodeInt16(vt)\n\tcase **int16:\n\t\terr = dec.decodeInt16Null(vt)\n\tcase *int32:\n\t\terr = dec.decodeInt32(vt)\n\tcase **int32:\n\t\terr = dec.decodeInt32Null(vt)\n\tcase *int64:\n\t\terr = dec.decodeInt64(vt)\n\tcase **int64:\n\t\terr = dec.decodeInt64Null(vt)\n\tcase *uint8:\n\t\terr = dec.decodeUint8(vt)\n\tcase **uint8:\n\t\terr = dec.decodeUint8Null(vt)\n\tcase *uint16:\n\t\terr = dec.decodeUint16(vt)\n\tcase **uint16:\n\t\terr = dec.decodeUint16Null(vt)\n\tcase *uint32:\n\t\terr = dec.decodeUint32(vt)\n\tcase **uint32:\n\t\terr = dec.decodeUint32Null(vt)\n\tcase *uint64:\n\t\terr = dec.decodeUint64(vt)\n\tcase **uint64:\n\t\terr = dec.decodeUint64Null(vt)\n\tcase *float64:\n\t\terr = dec.decodeFloat64(vt)\n\tcase **float64:\n\t\terr = dec.decodeFloat64Null(vt)\n\tcase *float32:\n\t\terr = dec.decodeFloat32(vt)\n\tcase **float32:\n\t\terr = dec.decodeFloat32Null(vt)\n\tcase *bool:\n\t\terr = dec.decodeBool(vt)\n\tcase **bool:\n\t\terr = dec.decodeBoolNull(vt)\n\tcase UnmarshalerJSONObject:\n\t\t_, err = dec.decodeObject(vt)\n\tcase UnmarshalerJSONArray:\n\t\t_, err = dec.decodeArray(vt)\n\tcase *EmbeddedJSON:\n\t\terr = dec.decodeEmbeddedJSON(vt)\n\tcase *interface{}:\n\t\terr = dec.decodeInterface(vt)\n\tdefault:\n\t\treturn InvalidUnmarshalError(fmt.Sprintf(invalidUnmarshalErrorMsg, reflect.TypeOf(vt).String()))\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn dec.err\n}", "func (dec *Decoder) Decode(v interface{}) error {\n\tif dec.isPooled == 1 {\n\t\tpanic(InvalidUsagePooledDecoderError(\"Invalid usage of pooled decoder\"))\n\t}\n\tvar err error\n\tswitch vt := v.(type) {\n\tcase *string:\n\t\terr = dec.decodeString(vt)\n\tcase **string:\n\t\terr = dec.decodeStringNull(vt)\n\tcase *int:\n\t\terr = dec.decodeInt(vt)\n\tcase **int:\n\t\terr = dec.decodeIntNull(vt)\n\tcase *int8:\n\t\terr = dec.decodeInt8(vt)\n\tcase **int8:\n\t\terr = dec.decodeInt8Null(vt)\n\tcase *int16:\n\t\terr = dec.decodeInt16(vt)\n\tcase **int16:\n\t\terr = dec.decodeInt16Null(vt)\n\tcase *int32:\n\t\terr = dec.decodeInt32(vt)\n\tcase **int32:\n\t\terr = dec.decodeInt32Null(vt)\n\tcase *int64:\n\t\terr = dec.decodeInt64(vt)\n\tcase **int64:\n\t\terr = dec.decodeInt64Null(vt)\n\tcase *uint8:\n\t\terr = dec.decodeUint8(vt)\n\tcase **uint8:\n\t\terr = dec.decodeUint8Null(vt)\n\tcase *uint16:\n\t\terr = dec.decodeUint16(vt)\n\tcase **uint16:\n\t\terr = dec.decodeUint16Null(vt)\n\tcase *uint32:\n\t\terr = dec.decodeUint32(vt)\n\tcase **uint32:\n\t\terr = dec.decodeUint32Null(vt)\n\tcase *uint64:\n\t\terr = dec.decodeUint64(vt)\n\tcase **uint64:\n\t\terr = dec.decodeUint64Null(vt)\n\tcase *float64:\n\t\terr = dec.decodeFloat64(vt)\n\tcase **float64:\n\t\terr = dec.decodeFloat64Null(vt)\n\tcase *float32:\n\t\terr = dec.decodeFloat32(vt)\n\tcase **float32:\n\t\terr = dec.decodeFloat32Null(vt)\n\tcase *bool:\n\t\terr = dec.decodeBool(vt)\n\tcase **bool:\n\t\terr = dec.decodeBoolNull(vt)\n\tcase UnmarshalerJSONObject:\n\t\t_, err = dec.decodeObject(vt)\n\tcase UnmarshalerJSONArray:\n\t\t_, err = dec.decodeArray(vt)\n\tcase *EmbeddedJSON:\n\t\terr = dec.decodeEmbeddedJSON(vt)\n\tcase *interface{}:\n\t\terr = dec.decodeInterface(vt)\n\tdefault:\n\t\treturn InvalidUnmarshalError(fmt.Sprintf(invalidUnmarshalErrorMsg, vt))\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn dec.err\n}", "func decodeBody(req *http.Request, out interface{}, cb func(interface{}) error) error {\n\t// This generally only happens in tests since real HTTP requests set\n\t// a non-nil body with no content. We guard against it anyways to prevent\n\t// a panic. The EOF response is the same behavior as an empty reader.\n\tif req.Body == nil {\n\t\treturn io.EOF\n\t}\n\n\tvar raw interface{}\n\tdec := json.NewDecoder(req.Body)\n\tif err := dec.Decode(&raw); err != nil {\n\t\treturn err\n\t}\n\n\t// Invoke the callback prior to decode\n\tif cb != nil {\n\t\tif err := cb(raw); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdecodeConf := &mapstructure.DecoderConfig{\n\t\tDecodeHook: mapstructure.ComposeDecodeHookFunc(\n\t\t\tmapstructure.StringToTimeDurationHookFunc(),\n\t\t\tstringToReadableDurationFunc(),\n\t\t),\n\t\tResult: &out,\n\t}\n\n\tdecoder, err := mapstructure.NewDecoder(decodeConf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn decoder.Decode(raw)\n}", "func DecodeField(fieldName string, t reflect.Type, v reflect.Value, tag reflect.StructTag, buf *bits.BitSetBuffer, sizeMap map[string]int, options ...EncDecOption) error {\n\tprocessed, err := decUnmarshaler(v, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif processed {\n\t\treturn nil\n\t}\n\n\tendianness, err := getEndianness(tag)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t}\n\n\tswitch t.Kind() {\n\tcase reflect.Ptr:\n\t\tval := reflect.New(t.Elem())\n\t\terr := DecodeField(fieldName, t.Elem(), val.Elem(), tag, buf, sizeMap, options...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Set(val)\n\tcase reflect.Interface:\n\t\tfor _, enc := range options {\n\t\t\tif enc.Type() == v.Type() {\n\t\t\t\terr := enc.DecoderFunc()(fieldName, t, v, tag, buf, sizeMap, options...)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"interface:%v was not found: interface not supported\", t.Name())\n\tcase reflect.Struct:\n\t\tm := make(map[string]int)\n\t\tfor k, v := range sizeMap {\n\t\t\tm[k] = v\n\t\t}\n\t\tprocessed, err := decStructSpecial(fieldName, t, v, tag, buf, m, options...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif processed {\n\t\t\treturn nil\n\t\t}\n\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\tsf := t.Field(i)\n\t\t\tvf := v.Field(i)\n\t\t\tif _, has := sf.Tag.Lookup(\"omit\"); has {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := DecodeField(sf.Name, sf.Type, vf, sf.Tag, buf, m, options...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase reflect.Array:\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\titem := v.Index(i)\n\t\t\tif err := DecodeField(\"\", item.Type(), item, tag, buf, sizeMap, options...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase reflect.Slice:\n\t\tall := true\n\t\tsuint := 0\n\t\tif s, ok := tag.Lookup(\"size\"); ok {\n\t\t\ttmp, err := strconv.ParseUint(s, 10, 64)\n\t\t\tsuint = int(tmp)\n\t\t\tif err != nil {\n\t\t\t\ti, has := sizeMap[s]\n\t\t\t\tswitch {\n\t\t\t\tcase !has:\n\t\t\t\t\treturn fmt.Errorf(\"size must either be a positive number or a field found prior to this field :%v\", err)\n\t\t\t\tcase i < 0:\n\t\t\t\t\treturn fmt.Errorf(\"value of %v is %v, to be used for size it must be nonnegative\", s, i)\n\t\t\t\t}\n\n\t\t\t\tsuint = i\n\t\t\t}\n\t\t\tall = false\n\t\t}\n\n\t\treflectionValue := reflect.New(t)\n\t\treflectionValue.Elem().Set(reflect.MakeSlice(t, 0, 10))\n\t\tsliceValuePtr := reflect.ValueOf(reflectionValue.Interface()).Elem()\n\t\tfor i := 0; i < suint || (all && !buf.PosAtEnd()); i++ {\n\t\t\titem := reflect.New(t.Elem())\n\t\t\tif err := DecodeField(\"\", item.Elem().Type(), item.Elem(), tag, buf, sizeMap, options...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tsliceValuePtr.Set(reflect.Append(sliceValuePtr, item.Elem()))\n\t\t}\n\n\t\tv.Set(sliceValuePtr)\n\tcase reflect.String:\n\t\tall := true\n\t\tsuint := uint64(0)\n\t\tif s, ok := tag.Lookup(\"strlen\"); ok {\n\t\t\tvar err error\n\t\t\tsuint, err = strconv.ParseUint(s, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\ti, has := sizeMap[s]\n\t\t\t\tswitch {\n\t\t\t\tcase !has:\n\t\t\t\t\treturn fmt.Errorf(\"strlen must either be a positive number or a field found prior to this field :%v\", err)\n\t\t\t\tcase i < 0:\n\t\t\t\t\treturn fmt.Errorf(\"value of %v is %v, to be used for strlen it must be nonnegative\", s, i)\n\t\t\t\t}\n\n\t\t\t\tsuint = uint64(i)\n\n\t\t\t}\n\t\t\tall = false\n\t\t}\n\n\t\tif all {\n\t\t\tsb := strings.Builder{}\n\n\t\t\tfor {\n\t\t\t\tbs := make([]byte, suint)\n\t\t\t\tn, err := buf.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t\t\t}\n\t\t\t\tif n != len(bs) {\n\t\t\t\t\tsb.Write(bs[:n])\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tsb.Write(bs)\n\t\t\t}\n\n\t\t\tv.SetString(sb.String())\n\t\t} else {\n\t\t\tbs := make([]byte, suint)\n\t\t\terr := binary.Read(buf, endianness, bs)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t\t}\n\t\t\tv.SetString(string(bs))\n\t\t}\n\tcase reflect.Bool:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 8, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x bool\n\t\tif hasBits {\n\t\t\ttmp, err := bits.ReadUint(buf, numOfBits, endianness)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t\t}\n\t\t\tx = tmp > 0\n\t\t} else {\n\t\t\tif err := binary.Read(buf, endianness, &x); err != nil {\n\t\t\t\treturn fmt.Errorf(\"expected to read bool from %v: %v\", fieldName, err)\n\t\t\t}\n\t\t}\n\n\t\tv.SetBool(x)\n\tcase reflect.Uint8:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 8, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x uint8\n\t\tif hasBits {\n\t\t\tvar tmp uint64\n\t\t\ttmp, err = bits.ReadUint(buf, numOfBits, endianness)\n\t\t\tx = uint8(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetUint(uint64(x))\n\tcase reflect.Uint16:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 16, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x uint16\n\t\tif hasBits {\n\t\t\tvar tmp uint64\n\t\t\ttmp, err = bits.ReadUint(buf, numOfBits, endianness)\n\t\t\tx = uint16(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetUint(uint64(x))\n\tcase reflect.Uint32:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 32, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x uint32\n\t\tif hasBits {\n\t\t\tvar tmp uint64\n\t\t\ttmp, err = bits.ReadUint(buf, numOfBits, endianness)\n\t\t\tx = uint32(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetUint(uint64(x))\n\tcase reflect.Uint64:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 64, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x uint64\n\t\tif hasBits {\n\t\t\tx, err = bits.ReadUint(buf, numOfBits, endianness)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetUint(x)\n\tcase reflect.Int8:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 8, 2)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x int8\n\t\tif hasBits {\n\t\t\tvar tmp int64\n\t\t\ttmp, err = bits.ReadInt(buf, numOfBits, endianness)\n\t\t\tx = int8(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetInt(int64(x))\n\tcase reflect.Int16:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 16, 2)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x int16\n\t\tif hasBits {\n\t\t\tvar tmp int64\n\t\t\ttmp, err = bits.ReadInt(buf, numOfBits, endianness)\n\t\t\tx = int16(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetInt(int64(x))\n\tcase reflect.Int32:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 32, 2)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x int32\n\t\tif hasBits {\n\t\t\tvar tmp int64\n\t\t\ttmp, err = bits.ReadInt(buf, numOfBits, endianness)\n\t\t\tx = int32(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetInt(int64(x))\n\tcase reflect.Int64:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 64, 2)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x int64\n\t\tif hasBits {\n\t\t\tx, err = bits.ReadInt(buf, numOfBits, endianness)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetInt(x)\n\tcase reflect.Float32:\n\t\t_, hasBits, _ := getBits(tag, map[string]int{}, 32, 0)\n\t\tif hasBits {\n\t\t\treturn fmt.Errorf(\"bits not supported with float32: %v\", fieldName)\n\t\t}\n\n\t\tvar x float32\n\t\tif err := binary.Read(buf, endianness, &x); err != nil {\n\t\t\treturn fmt.Errorf(\"expected to read float32 from %v: %v\", fieldName, err)\n\t\t}\n\n\t\tv.SetFloat(float64(x))\n\tcase reflect.Float64:\n\t\t_, hasBits, _ := getBits(tag, map[string]int{}, 64, 0)\n\t\tif hasBits {\n\t\t\treturn fmt.Errorf(\"bits not supported with float64: %v\", fieldName)\n\t\t}\n\n\t\tvar x float64\n\t\tif err := binary.Read(buf, endianness, &x); err != nil {\n\t\t\treturn fmt.Errorf(\"expected to read float64 from %v: %v\", fieldName, err)\n\t\t}\n\n\t\tv.SetFloat(x)\n\tdefault:\n\t\treturn fmt.Errorf(\"%v not supported\", t)\n\t}\n\n\treturn nil\n}", "func (dec *Decoder) decode(_ Code, _ reflect.Value) {\n\tdefer catchError(&dec.err)\n}", "func (d *decoder) Decode(s *bufio.Scanner) (obj interface{}, err error) {\n\tb, err := ReadBytes(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(b) == 0 {\n\t\tlog.Err(\"empty or malformed payload: %q\", b)\n\t\treturn nil, ErrBadMsg\n\t}\n\n\tswitch b[0] {\n\tcase STRING:\n\t\treturn decodeString(b)\n\tcase INT:\n\t\treturn decodeInt(b)\n\tcase NIL:\n\t\treturn nil, decodeNil(s)\n\tcase SLICE:\n\t\treturn d.decodeSlice(b, s)\n\tcase MAP:\n\t\treturn d.decodeMap(b, s)\n\tcase ERROR:\n\t\treturn decodeErr(b)\n\t}\n\n\tlog.Err(\"unsupported payload type: %q\", b)\n\treturn nil, ErrUnsupportedType\n}", "func decodeInternal(kind int, data []byte, v interface{}) (err error) {\n\tdefer handleAbort(&err)\n\tvalue, ok := v.(reflect.Value)\n\tif !ok {\n\t\tvalue = reflect.ValueOf(v)\n\t\tswitch value.Kind() {\n\t\tcase reflect.Map:\n\t\t\tif value.IsNil() {\n\t\t\t\treturn errors.New(\"bson: Decode map arg must not be nil.\")\n\t\t\t}\n\t\tcase reflect.Ptr:\n\t\t\tif value.IsNil() {\n\t\t\t\treturn errors.New(\"bson: Decode pointer arg must not be nil.\")\n\t\t\t}\n\t\t\tvalue = value.Elem()\n\t\tdefault:\n\t\t\treturn errors.New(\"bson: Decode arg must be pointer or map.\")\n\t\t}\n\t}\n\n\td := decodeState{data: data}\n\td.decodeValue(kind, value)\n\treturn d.savedError\n}", "func (b *NetworkTypeBox) Decode(buf *bin.Buffer) error {\n\tif b == nil {\n\t\treturn fmt.Errorf(\"unable to decode NetworkTypeBox to nil\")\n\t}\n\tv, err := DecodeNetworkType(buf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to decode boxed value: %w\", err)\n\t}\n\tb.NetworkType = v\n\treturn nil\n}", "func (d *Decoder) Decode(b []byte) (interface{}, error) {\n\tnv := reflect.New(d.Type).Interface()\n\tif err := d.Func(b, nv); err != nil {\n\t\treturn nil, err\n\t}\n\tptr := reflect.ValueOf(nv)\n\treturn ptr.Elem().Interface(), nil\n}", "func decodeTuple(a *DatumAlloc, tupTyp *types.T, b []byte) (tree.Datum, []byte, error) {\n\tb, _, _, err := encoding.DecodeNonsortingUvarint(b)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresult := tree.DTuple{\n\t\tD: a.NewDatums(len(tupTyp.TupleContents())),\n\t}\n\n\tvar datum tree.Datum\n\tfor i := range tupTyp.TupleContents() {\n\t\tdatum, b, err = DecodeTableValue(a, tupTyp.TupleContents()[i], b)\n\t\tif err != nil {\n\t\t\treturn nil, b, err\n\t\t}\n\t\tresult.D[i] = datum\n\t}\n\treturn a.NewDTuple(result), b, nil\n}", "func (codec TypeLengthValueCodec) Decode(raw net.Conn) (Message, error) {\n\tbyteChan := make(chan []byte)\n\terrorChan := make(chan error)\n\n\tgo func(bc chan []byte, ec chan error) {\n\t\ttypeData := make([]byte, MessageTypeBytes)\n\t\t_, err := io.ReadFull(raw, typeData)\n\t\tif err != nil {\n\t\t\tec <- err\n\t\t\tclose(bc)\n\t\t\tclose(ec)\n\t\t\tlogger.Debugln(\"go-routine read message type exited\", err)\n\t\t\treturn\n\t\t}\n\t\tbc <- typeData\n\t}(byteChan, errorChan)\n\n\tvar typeBytes []byte\n\n\tselect {\n\tcase err := <-errorChan:\n\t\treturn nil, err\n\n\tcase typeBytes = <-byteChan:\n\t\tif typeBytes == nil {\n\t\t\treturn nil, ErrBadData\n\t\t}\n\t\ttypeBuf := bytes.NewReader(typeBytes)\n\t\tvar msgType uint16\n\t\tif err := binary.Read(typeBuf, binary.BigEndian, &msgType); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttypehandler := GetTypeHandlerFunc(msgType)\n\t\theadhandler := GetHeadHandlerFunc(msgType)\n\t\tunmarshaler := GetUnmarshalFunc(msgType)\n\t\tif typehandler == nil || headhandler == nil || unmarshaler == nil {\n\t\t\treturn nil, ErrUndefined(msgType)\n\t\t}\n\n\t\tmsgHeadLen, errtype := typehandler(msgType)\n\t\tif errtype != nil {\n\t\t\treturn nil, errtype\n\t\t}\n\n\t\tlengthBytes := make([]byte, msgHeadLen)\n\t\t_, err := io.ReadFull(raw, lengthBytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\theadBytes := append(typeBytes, lengthBytes...)\n\t\tvar msgLen uint\n\t\tmsgLen, err = headhandler(headBytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif msgLen > MessageMaxBytes {\n\t\t\tlogger.Errorf(\"message(type %d) has bytes(%d) beyond max %d\\n\", msgType, msgLen, MessageMaxBytes)\n\t\t\treturn nil, ErrBadData\n\t\t}\n\n\t\t// read application data\n\t\tmsgBytes := make([]byte, msgLen)\n\t\t_, err = io.ReadFull(raw, msgBytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// deserialize message from bytes\n\t\treturn unmarshaler(append(headBytes, msgBytes...))\n\t}\n}", "func Unpack(b []byte) (Tuple, error) {\n\tvar t Tuple\n\n\tvar i int\n\n\tfor i < len(b) {\n\t\tvar el interface{}\n\t\tvar off int\n\n\t\tswitch {\n\t\tcase b[i] == 0x00:\n\t\t\tel = nil\n\t\t\toff = 1\n\t\tcase b[i] == 0x01:\n\t\t\tel, off = decodeBytes(b[i:])\n\t\tcase b[i] == 0x02:\n\t\t\tel, off = decodeString(b[i:])\n\t\tcase 0x0c <= b[i] && b[i] <= 0x1c:\n\t\t\tel, off = decodeInt(b[i:])\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unable to decode tuple element with unknown typecode %02x\", b[i])\n\t\t}\n\n\t\tt = append(t, el)\n\t\ti += off\n\t}\n\n\treturn t, nil\n}", "func gobDecode(buf []byte, into interface{}) error {\n\tif buf == nil {\n\t\treturn nil\n\t}\n\tdec := gob.NewDecoder(bytes.NewReader(buf))\n\treturn dec.Decode(into)\n}", "func (float64DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictFloat64Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainFloat64Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func Decode(m interface{}, val interface{}) error {\n\tif err := check(val); err != nil {\n\t\treturn err\n\t}\n\n\treturn decode(m, reflect.ValueOf(val).Elem())\n}", "func (o FunctionInputResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FunctionInputResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func (c Converter) Decode(r io.ReadSeeker) (interface{}, error) {\n\treturn nil, errors.New(\"decoding dhall files is not implemented yet\")\n}", "func (byteArrayDecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictByteArrayDecoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainByteArrayDecoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tcase parquet.Encodings.DeltaLengthByteArray:\n\t\tif mem == nil {\n\t\t\tmem = memory.DefaultAllocator\n\t\t}\n\t\treturn &DeltaLengthByteArrayDecoder{\n\t\t\tdecoder: newDecoderBase(format.Encoding(e), descr),\n\t\t\tmem: mem,\n\t\t}\n\tcase parquet.Encodings.DeltaByteArray:\n\t\tif mem == nil {\n\t\t\tmem = memory.DefaultAllocator\n\t\t}\n\t\treturn &DeltaByteArrayDecoder{\n\t\t\tDeltaLengthByteArrayDecoder: &DeltaLengthByteArrayDecoder{\n\t\t\t\tdecoder: newDecoderBase(format.Encoding(e), descr),\n\t\t\t\tmem: mem,\n\t\t\t}}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func (o FunctionOutputResponsePtrOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FunctionOutputResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.DataType\n\t}).(pulumi.StringPtrOutput)\n}", "func packType(v interface{}) byte {\n\tswitch v.(type) {\n\tcase nil:\n\t\treturn ptNone\n\tcase string:\n\t\treturn ptString\n\tcase int32:\n\t\treturn ptInt\n\tcase float32:\n\t\treturn ptFloat\n\tcase uint32:\n\t\treturn ptPtr\n\tcase []uint16:\n\t\treturn ptWString\n\tcase color.NRGBA:\n\t\treturn ptColor\n\tcase uint64:\n\t\treturn ptUint64\n\tdefault:\n\t\tpanic(\"invalid vdf.Node\")\n\t}\n}", "func (d *Decoder) Decode(v interface{}) error {\n\treturn nil\n}", "func (int64DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictInt64Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainInt64Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tcase parquet.Encodings.DeltaBinaryPacked:\n\t\tif mem == nil {\n\t\t\tmem = memory.DefaultAllocator\n\t\t}\n\t\treturn &DeltaBitPackInt64Decoder{\n\t\t\tdeltaBitPackDecoder: &deltaBitPackDecoder{\n\t\t\t\tdecoder: newDecoderBase(format.Encoding(e), descr),\n\t\t\t\tmem: mem,\n\t\t\t}}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func (DictByteArrayDecoder) Type() parquet.Type {\n\treturn parquet.Types.ByteArray\n}", "func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func (dec *Decoder) DecodeValue(v reflect.Value) error {\n\tif v.IsValid() {\n\t\tif v.Kind() == reflect.Ptr && !v.IsNil() {\n\t\t\t// That's okay, we'll store through the pointer.\n\t\t} else if !v.CanSet() {\n\t\t\treturn errors.New(\"binpack: DecodeValue of unassignable value\")\n\t\t}\n\t}\n\n\tdec.buf.Reset() // In case data lingers from previous invocation.\n\tdec.err = nil\n\tid := dec.decodeType(false)\n\tif dec.err == nil {\n\t\tdec.decode(id, v)\n\t}\n\treturn dec.err\n}", "func decodeDecoderNoPtr(s *Stream, val reflect.Value) error {\n\treturn val.Addr().Interface().(Decoder).DecodeRLP(s)\n}", "func (c *coder) decoderForType(keyOrValue, typ string) (func(m json.RawMessage) ([]byte, error), error) {\n\tvar dec func(s string) ([]byte, error)\n\tswitch typ {\n\tcase \"json\":\n\t\t// Easy case - we already have the JSON-marshaled data.\n\t\treturn func(m json.RawMessage) ([]byte, error) {\n\t\t\treturn m, nil\n\t\t}, nil\n\tcase \"hex\":\n\t\tdec = hex.DecodeString\n\tcase \"base64\":\n\t\tdec = base64.StdEncoding.DecodeString\n\tcase \"string\":\n\t\tdec = func(s string) ([]byte, error) {\n\t\t\treturn []byte(s), nil\n\t\t}\n\tcase \"avro\":\n\t\treturn c.makeAvroDecoder(keyOrValue), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(`unsupported decoder %#v, only json, string, hex and base64 are supported`, typ)\n\t}\n\treturn func(m json.RawMessage) ([]byte, error) {\n\t\tvar s string\n\t\tif err := json.Unmarshal(m, &s); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn dec(s)\n\t}, nil\n}", "func (c Coder) Type() FullType {\n\tif !c.IsValid() {\n\t\tpanic(\"Invalid Coder\")\n\t}\n\treturn c.coder.T\n}", "func (fixedLenByteArrayDecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictFixedLenByteArrayDecoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainFixedLenByteArrayDecoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func (tv *TypedBytes) ValueType() ValueType {\n\treturn tv.Type\n}", "func (p Packet) TypeData() []byte {\n\tl := p.Len()\n\tif l < EapMsgData {\n\t\treturn nil\n\t}\n\treturn p[EapMsgData:l]\n}", "func textUnmarshalerDecode(\n\tinputType reflect.Type, outputType reflect.Type, data interface{},\n) (interface{}, error) {\n\tif !reflect.PtrTo(outputType).Implements(stringUnmarshalerType) {\n\t\treturn data, nil\n\t}\n\tvalue, ok := data.(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid type %v\", inputType)\n\t}\n\tparsedValue, ok := reflect.New(outputType).Interface().(stringUnmarshaler)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid output type %v\", outputType)\n\t}\n\terr := parsedValue.Decode(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parsedValue, nil\n}" ]
[ "0.68913203", "0.66737515", "0.6513736", "0.6163966", "0.5928105", "0.5928105", "0.5900145", "0.58706284", "0.5865244", "0.57236636", "0.5708626", "0.5692009", "0.5692009", "0.5660723", "0.56320024", "0.56186193", "0.5612395", "0.5568661", "0.5550775", "0.5543029", "0.55170673", "0.5500252", "0.5488188", "0.5476487", "0.54477143", "0.54462093", "0.5443971", "0.542918", "0.54078287", "0.5400475", "0.5397801", "0.5383795", "0.5365684", "0.5358495", "0.5350722", "0.5350191", "0.5342746", "0.5341991", "0.5338556", "0.53272724", "0.53252184", "0.5325048", "0.531087", "0.5302521", "0.5278624", "0.52376777", "0.523178", "0.52202624", "0.52197915", "0.52182084", "0.52146935", "0.5208036", "0.5207227", "0.5197151", "0.5187175", "0.51738673", "0.51700354", "0.51652277", "0.51501054", "0.5146649", "0.51399165", "0.51220083", "0.5114587", "0.5112553", "0.50898755", "0.50832075", "0.5075352", "0.5064728", "0.50571865", "0.50571865", "0.50570554", "0.5056346", "0.5054455", "0.5054399", "0.505269", "0.5051861", "0.502939", "0.5025253", "0.5014807", "0.5002359", "0.49943674", "0.4966509", "0.49617448", "0.49494275", "0.49452", "0.49434263", "0.49402672", "0.49382088", "0.4936799", "0.49356726", "0.49349394", "0.49280345", "0.4923981", "0.49208045", "0.49186447", "0.4912746", "0.49040446", "0.4903141", "0.4902807", "0.48959854" ]
0.6807437
1
DecodeType will attempt to decode the buffer into the pointer outT
func (g *GobDecoderLight) DecodeType(buf []byte, outT interface{}) error { defer func() { g.bytes.Reset() }() reader := bytes.NewReader(buf) if _, err := io.Copy(g.bytes, reader); err != nil { return err } return g.decoder.Decode(outT) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *GobTranscoder) DecodeType(buf []byte, outT interface{}) error {\n\tg.decoderMut.Lock()\n\tdefer func() {\n\t\tg.inBytes.Reset()\n\t\tg.decoderMut.Unlock()\n\t}()\n\treader := bytes.NewReader(buf)\n\tif _, err := io.Copy(g.inBytes, reader); err != nil {\n\t\treturn err\n\t}\n\treturn g.decoder.Decode(outT)\n}", "func (dec *Decoder) decodeType(isInterface bool) Code {\n\treturn 0\n}", "func (d *Decoder) Type() (Type, error) {\n\n\t// start with 1 byte and append to it until we get a clean varint\n\tvar (\n\t\ttag uint64\n\t\ttagBytes []byte\n\t)\n\nreadTagByte:\n\tfor {\n\t\tvar singleByte = make([]byte, 1)\n\t\t_, err := io.ReadFull(d.input, singleByte)\n\t\tif err != nil {\n\t\t\treturn typeUninited, err\n\t\t}\n\t\ttagBytes = append(tagBytes, singleByte[0])\n\n\t\tvar byteCount int\n\t\ttag, byteCount = varint.ConsumeVarint(tagBytes)\n\t\tswitch {\n\t\tcase byteCount == varint.ErrCodeTruncated:\n\t\t\tcontinue readTagByte\n\t\tcase byteCount > 0:\n\t\t\tfmt.Fprintln(dbg, \"\\tvarint byteCount:\", byteCount)\n\t\t\tbreak readTagByte // we got a varint!\n\t\tdefault:\n\t\t\treturn typeUninited, fmt.Errorf(\"bipf: broken varint tag field\")\n\t\t}\n\t}\n\n\tfmt.Fprintf(dbg, \"\\tdecoded %x to tag: %d\\n\", tagBytes, tag)\n\n\t// apply mask to get type\n\td.currentType = Type(tag & tagMask)\n\tif d.currentType >= TypeReserved {\n\t\treturn 0, fmt.Errorf(\"bipf: invalid type: %s\", d.currentType)\n\t}\n\n\t// shift right to get length\n\td.currentLen = uint64(tag >> tagSize)\n\n\t// drop some debugging info\n\tfmt.Fprintln(dbg, \"\\tvalue type:\", d.currentType)\n\tfmt.Fprintln(dbg, \"\\tvalue length:\", d.currentLen)\n\tfmt.Fprintln(dbg)\n\tdbg.Sync()\n\n\treturn d.currentType, nil\n}", "func (g *Generator) genTypeDecoder(t reflect.Type, out string, tags fieldTags, indent int) error {\n\tws := strings.Repeat(\" \", indent)\n\n\tunmarshalerIface := reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"(\"+out+\").UnmarshalEasyJSON(in)\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.Raw(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalJSON(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.UnsafeBytes(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalText(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\terr := g.genTypeDecoderNoCheck(t, out, tags, indent)\n\treturn err\n}", "func Decode(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func Decode(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func (decoder *Decoder) ReadType() (string, error) {\n s, err := decoder.ReadString()\n if err != nil {\n decoder.Recover()\n refId, err := decoder.ReadInt()\n if err != nil {\n return \"\", errors.New(\"readType: unexpected code\")\n }\n ref, ok := decoder.refMap[refId]\n if !ok {\n return \"\", errors.New(\"readType: unknown type\")\n }\n stringType, ok := ref.(string) // assertion\n if !ok {\n return \"\", errors.New(\"readType: unknown type\")\n }\n decoder.success()\n return stringType, nil\n }\n decoder.addRef(s)\n decoder.success()\n return s, nil\n}", "func (t *LegacyTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode binary in a byte array or interface\")\n\t\t}\n\t} else if valueType == gocbcore.StringType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *string:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode a string in a string or interface\")\n\t\t}\n\t} else if valueType == gocbcore.JSONType {\n\t\terr := json.Unmarshal(bytes, &out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func (c byteCodec) Decode(data []byte, out interface{}) error {\n\t// #1 reflect method but is slow\n\t//reflect.Indirect(reflect.ValueOf(i)).SetBytes(data)\n\n\t// #2\n\tswitch v := out.(type) {\n\tcase *[]byte:\n\t\t*v = *&data\n\t\treturn nil\n\tcase *string:\n\t\t*v = string(data)\n\t\treturn nil\n\n\tdefault:\n\t\treturn fmt.Errorf(\"%T is not a []byte\", out)\n\t}\n}", "func (bs endecBytes) Type() byte {\n\treturn bs[0] >> 4\n}", "func (t SGTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\n\tswitch typedOut := out.(type) {\n\tcase *[]byte:\n\t\t*typedOut = bytes\n\t\treturn nil\n\tdefault:\n\t\tdefaultTranscoder := gocb.DefaultTranscoder{}\n\t\treturn defaultTranscoder.Decode(bytes, flags, out)\n\n\t}\n\n}", "func decodeMsgPack(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func decodeMsgPack(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func (d Decoder) Decode(out interface{}) (err error) {\n\treturn d.Provider.Decode(out)\n}", "func (d *Decoder) Peek() Type {\n\tdefer func() { d.lastCall = peekCall }()\n\tif d.lastCall == readCall {\n\t\td.value, d.err = d.Read()\n\t}\n\treturn d.value.typ\n}", "func (d *Decoder) DecodeValue(rv reflect.Value) error {\n\tif rv.Kind() != reflect.Ptr {\n\t\treturn fmt.Errorf(\"msgpack: not a pointer type %q\", rv.Type())\n\t}\n\n\tcustomType, err := d.readCustomType()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.decodeValue(rv, customType)\n}", "func interfaceDecode(dec *gob.Decoder) Pythagoras {\n\t//The decode will fail unless the concrete type on the wire has been registered.\n\t//we registered it in the calling function\n\tvar p Pythagoras\n\terr := dec.Decode(&p)\n\tif err != nil {\n\t\tlog.Fatal(\"Decode:\", err)\n\t}\n\treturn p\n}", "func (t *JSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\treturn errors.New(\"binary datatype is not supported by JSONTranscoder\")\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"string datatype is not supported by JSONTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\terr := json.Unmarshal(bytes, &out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func (d *Decoder) Decode(r io.Reader, t *dials.Type) (reflect.Value, error) {\n\ttomlBytes, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn reflect.Value{}, fmt.Errorf(\"error reading TOML: %s\", err)\n\t}\n\n\t// Use the TagCopyingMangler to copy over TOML tags from dials tags if TOML\n\t// tags aren't specified.\n\ttfmr := transform.NewTransformer(t.Type(),\n\t\t&tagformat.TagCopyingMangler{\n\t\t\tSrcTag: common.DialsTagName, NewTag: TOMLTagName})\n\tval, tfmErr := tfmr.Translate()\n\tif tfmErr != nil {\n\t\treturn reflect.Value{}, fmt.Errorf(\"failed to convert tags: %s\", tfmErr)\n\t}\n\n\t// Get a pointer to our value, so we can pass that.\n\tinstance := val.Addr().Interface()\n\terr = tomlparser.Unmarshal(tomlBytes, instance)\n\tif err != nil {\n\t\treturn reflect.Value{}, err\n\t}\n\n\tunmangledVal, unmangleErr := tfmr.ReverseTranslate(val)\n\tif unmangleErr != nil {\n\t\treturn reflect.Value{}, unmangleErr\n\t}\n\n\treturn unmangledVal, nil\n}", "func (dec *Decoder) Decode(e interface{}) error {\n\tif e == nil {\n\t\treturn dec.DecodeValue(reflect.Value{})\n\t}\n\tvalue := reflect.ValueOf(e)\n\t// If e represents a value as opposed to a pointer, the answer won't\n\t// get back to the caller. Make sure it's a pointer.\n\tif value.Type().Kind() != reflect.Ptr {\n\t\tdec.err = errors.New(\"binpack: attempt to decode into a non-pointer\")\n\t\treturn dec.err\n\t}\n\treturn dec.DecodeValue(value)\n}", "func Decode(data []byte) any {\n\tvar buffer = new(protocol.ByteBuffer)\n\tbuffer.WriteUBytes(data)\n\tvar packet = protocol.Read(buffer)\n\treturn packet\n}", "func readType(r io.ByteReader) *Type {\n\tt := &Type{}\n\tt.Address = readUvarint(r)\n\tt.Size = readUvarint(r)\n\tt.Name = readString(r)\n\tt.IsPtr = readUvarint(r) == 1\n\tt.FieldList = readFieldList(r)\n\treturn t\n}", "func (d *Decoder) Decode(ctx context.Context, b []byte) (interface{}, error) {\n\tnv := reflect.New(d.typ).Interface()\n\tif err := d.fn(ctx, b, nv); err != nil {\n\t\treturn nil, err\n\t}\n\tptr := reflect.ValueOf(nv)\n\treturn ptr.Elem().Interface(), nil\n}", "func (t *RawBinaryTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode binary in a byte array or interface\")\n\t\t}\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"only binary datatype is supported by RawBinaryTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\treturn errors.New(\"only binary datatype is supported by RawBinaryTranscoder\")\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func Decode(data interface{}, ptr interface{}) {\n v := Value{data}\n v.Decode(ptr)\n}", "func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{\"out_type\": out_type}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"DecodeRaw\",\n\t\tInput: []tf.Input{\n\t\t\tbytes,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func decodeMsgPack(buf []byte, out interface{}) error {\n\tr := bytes.NewBuffer(buf)\n\thd := MsgpackHandle{}\n\tdec := NewDecoder(r, &hd)\n\treturn dec.Decode(out)\n}", "func (g *Generator) genTypeDecoderNoCheck(t reflect.Type, out string, tags fieldTags, indent int) error {\n\tws := strings.Repeat(\" \", indent)\n\t// Check whether type is primitive, needs to be done after interface check.\n\tif dec := customDecoders[t.String()]; dec != \"\" {\n\t\tfmt.Fprintln(g.out, ws+out+\" = \"+dec)\n\t\treturn nil\n\t} else if dec := primitiveStringDecoders[t.Kind()]; dec != \"\" && tags.asString {\n\t\tif tags.intern && t.Kind() == reflect.String {\n\t\t\tdec = \"in.StringIntern()\"\n\t\t}\n\t\tfmt.Fprintln(g.out, ws+out+\" = \"+g.getType(t)+\"(\"+dec+\")\")\n\t\treturn nil\n\t} else if dec := primitiveDecoders[t.Kind()]; dec != \"\" {\n\t\tif tags.intern && t.Kind() == reflect.String {\n\t\t\tdec = \"in.StringIntern()\"\n\t\t}\n\t\tif tags.noCopy && t.Kind() == reflect.String {\n\t\t\tdec = \"in.UnsafeString()\"\n\t\t}\n\t\tfmt.Fprintln(g.out, ws+out+\" = \"+g.getType(t)+\"(\"+dec+\")\")\n\t\treturn nil\n\t}\n\n\tswitch t.Kind() {\n\tcase reflect.Slice:\n\t\ttmpVar := g.uniqueVarName()\n\t\telem := t.Elem()\n\n\t\tif elem.Kind() == reflect.Uint8 && elem.Name() == \"uint8\" {\n\t\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = nil\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tif g.simpleBytes {\n\t\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = []byte(in.String())\")\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = in.Bytes()\")\n\t\t\t}\n\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\n\t\t} else {\n\n\t\t\tcapacity := 1\n\t\t\tif elem.Size() > 0 {\n\t\t\t\tcapacity = minSliceBytes / int(elem.Size())\n\t\t\t}\n\n\t\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = nil\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Delim('[')\")\n\t\t\tfmt.Fprintln(g.out, ws+\" if \"+out+\" == nil {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" if !in.IsDelim(']') {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = make(\"+g.getType(t)+\", 0, \"+fmt.Sprint(capacity)+\")\")\n\t\t\tfmt.Fprintln(g.out, ws+\" } else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = \"+g.getType(t)+\"{}\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" } else { \")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = (\"+out+\")[:0]\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" for !in.IsDelim(']') {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" var \"+tmpVar+\" \"+g.getType(elem))\n\n\t\t\tif err := g.genTypeDecoder(elem, tmpVar, tags, indent+2); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = append(\"+out+\", \"+tmpVar+\")\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.WantComma()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Delim(']')\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\t}\n\n\tcase reflect.Array:\n\t\titerVar := g.uniqueVarName()\n\t\telem := t.Elem()\n\n\t\tif elem.Kind() == reflect.Uint8 && elem.Name() == \"uint8\" {\n\t\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" copy(\"+out+\"[:], in.Bytes())\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\n\t\t} else {\n\n\t\t\tlength := t.Len()\n\n\t\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Delim('[')\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+iterVar+\" := 0\")\n\t\t\tfmt.Fprintln(g.out, ws+\" for !in.IsDelim(']') {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" if \"+iterVar+\" < \"+fmt.Sprint(length)+\" {\")\n\n\t\t\tif err := g.genTypeDecoder(elem, \"(\"+out+\")[\"+iterVar+\"]\", tags, indent+3); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+iterVar+\"++\")\n\t\t\tfmt.Fprintln(g.out, ws+\" } else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.SkipRecursive()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.WantComma()\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.Delim(']')\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\t}\n\n\tcase reflect.Struct:\n\t\tdec := g.getDecoderName(t)\n\t\tg.addType(t)\n\n\t\tif len(out) > 0 && out[0] == '*' {\n\t\t\t// NOTE: In order to remove an extra reference to a pointer\n\t\t\tfmt.Fprintln(g.out, ws+dec+\"(in, \"+out[1:]+\")\")\n\t\t} else {\n\t\t\tfmt.Fprintln(g.out, ws+dec+\"(in, &\"+out+\")\")\n\t\t}\n\n\tcase reflect.Ptr:\n\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = nil\")\n\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\tfmt.Fprintln(g.out, ws+\" if \"+out+\" == nil {\")\n\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = new(\"+g.getType(t.Elem())+\")\")\n\t\tfmt.Fprintln(g.out, ws+\" }\")\n\n\t\tif err := g.genTypeDecoder(t.Elem(), \"*\"+out, tags, indent+1); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\n\tcase reflect.Map:\n\t\tkey := t.Key()\n\t\tkeyDec, ok := primitiveStringDecoders[key.Kind()]\n\t\tif !ok && !hasCustomUnmarshaler(key) {\n\t\t\treturn fmt.Errorf(\"map type %v not supported: only string and integer keys and types implementing json.Unmarshaler are allowed\", key)\n\t\t} // else assume the caller knows what they are doing and that the custom unmarshaler performs the translation from string or integer keys to the key type\n\t\telem := t.Elem()\n\t\ttmpVar := g.uniqueVarName()\n\t\tkeepEmpty := tags.required || tags.noOmitEmpty || (!g.omitEmpty && !tags.omitEmpty)\n\n\t\tfmt.Fprintln(g.out, ws+\"if in.IsNull() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.Skip()\")\n\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.Delim('{')\")\n\t\tif !keepEmpty {\n\t\t\tfmt.Fprintln(g.out, ws+\" if !in.IsDelim('}') {\")\n\t\t}\n\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = make(\"+g.getType(t)+\")\")\n\t\tif !keepEmpty {\n\t\t\tfmt.Fprintln(g.out, ws+\" } else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = nil\")\n\t\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\t}\n\n\t\tfmt.Fprintln(g.out, ws+\" for !in.IsDelim('}') {\")\n\t\t// NOTE: extra check for TextUnmarshaler. It overrides default methods.\n\t\tif reflect.PtrTo(key).Implements(reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()) {\n\t\t\tfmt.Fprintln(g.out, ws+\" var key \"+g.getType(key))\n\t\t\tfmt.Fprintln(g.out, ws+\"if data := in.UnsafeBytes(); in.Ok() {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" in.AddError(key.UnmarshalText(data) )\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\t} else if keyDec != \"\" {\n\t\t\tfmt.Fprintln(g.out, ws+\" key := \"+g.getType(key)+\"(\"+keyDec+\")\")\n\t\t} else {\n\t\t\tfmt.Fprintln(g.out, ws+\" var key \"+g.getType(key))\n\t\t\tif err := g.genTypeDecoder(key, \"key\", tags, indent+2); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintln(g.out, ws+\" in.WantColon()\")\n\t\tfmt.Fprintln(g.out, ws+\" var \"+tmpVar+\" \"+g.getType(elem))\n\n\t\tif err := g.genTypeDecoder(elem, tmpVar, tags, indent+2); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Fprintln(g.out, ws+\" (\"+out+\")[key] = \"+tmpVar)\n\t\tfmt.Fprintln(g.out, ws+\" in.WantComma()\")\n\t\tfmt.Fprintln(g.out, ws+\" }\")\n\t\tfmt.Fprintln(g.out, ws+\" in.Delim('}')\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\n\tcase reflect.Interface:\n\t\tif t.NumMethod() != 0 {\n\t\t\tif g.interfaceIsEasyjsonUnmarshaller(t) {\n\t\t\t\tfmt.Fprintln(g.out, ws+out+\".UnmarshalEasyJSON(in)\")\n\t\t\t} else if g.interfaceIsJsonUnmarshaller(t) {\n\t\t\t\tfmt.Fprintln(g.out, ws+out+\".UnmarshalJSON(in.Raw())\")\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"interface type %v not supported: only interface{} and easyjson/json Unmarshaler are allowed\", t)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Fprintln(g.out, ws+\"if m, ok := \"+out+\".(easyjson.Unmarshaler); ok {\")\n\t\t\tfmt.Fprintln(g.out, ws+\"m.UnmarshalEasyJSON(in)\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else if m, ok := \"+out+\".(json.Unmarshaler); ok {\")\n\t\t\tfmt.Fprintln(g.out, ws+\"_ = m.UnmarshalJSON(in.Raw())\")\n\t\t\tfmt.Fprintln(g.out, ws+\"} else {\")\n\t\t\tfmt.Fprintln(g.out, ws+\" \"+out+\" = in.Interface()\")\n\t\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"don't know how to decode %v\", t)\n\t}\n\treturn nil\n\n}", "func (decode *decoder) ensureOutType(outType reflect.Type) error {\n\tswitch outType.Kind() {\n\tcase reflect.Slice:\n\t\tfallthrough\n\tcase reflect.Array:\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"cannot use \" + outType.String() + \", only slice or array supported\")\n}", "func (t *Type) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gt gobType\n\terr := dec.Decode(&gt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Type: %s\", err)\n\t}\n\tif gt.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Type encoding version %d; only 0 is supported\", gt.Version)\n\t}\n\n\tt.typeImpl = gt.Impl\n\n\treturn nil\n}", "func (decode *decoder) ensureOutInnerType(outInnerType reflect.Type) error {\n\tswitch outInnerType.Kind() {\n\tcase reflect.Struct:\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"cannot use \" + outInnerType.String() + \", only struct supported\")\n}", "func decodeDecoder(s *Stream, val reflect.Value) error {\n\tif val.Kind() == reflect.Ptr && val.IsNil() {\n\t\t// set the value to the pointer pointed to the 0 represented by the data type\n\t\tval.Set(reflect.New(val.Type().Elem()))\n\t}\n\t// transfer the reflect type back to Decoder interface type, and call DecodeRLP method\n\treturn val.Interface().(Decoder).DecodeRLP(s)\n}", "func UnmarshalType(b []byte, outputType reflect.Type) (interface{}, error) {\n\n\tif len(b) == 0 {\n\t\treturn nil, ErrEmptyInput\n\t}\n\n\tswitch string(b) {\n\tcase \"true\":\n\t\tif outputType.Kind() != reflect.Bool {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Bool}}\n\t\t}\n\t\treturn true, nil\n\tcase \"false\":\n\t\tif outputType.Kind() != reflect.Bool {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Bool}}\n\t\t}\n\t\treturn false, nil\n\tcase \"null\":\n\t\treturn nil, nil\n\t}\n\n\tfirst, _ := utf8.DecodeRune(b)\n\tif first == utf8.RuneError {\n\t\treturn nil, ErrInvalidRune\n\t}\n\n\tswitch first {\n\tcase '[', '-':\n\t\tif outputType.Kind() != reflect.Slice {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Slice}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeSlice(outputType, 0, 0))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling JSON %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\tcase '{':\n\t\tif k := outputType.Kind(); k != reflect.Map {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Map}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeMap(outputType))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling JSON %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\tcase '\"':\n\t\tif k := outputType.Kind(); k != reflect.String {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.String}}\n\t\t}\n\t\tobj := \"\"\n\t\terr := goyaml.Unmarshal(b, &obj)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling JSON %q\", string(b)))\n\t\t}\n\t\treturn obj, nil\n\t}\n\n\tif strings.Contains(string(b), \"\\n\") {\n\t\tif k := outputType.Kind(); k != reflect.Map {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Map}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeMap(outputType))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\t}\n\n\tswitch outputType.Kind() {\n\tcase reflect.Int:\n\t\ti, err := strconv.Atoi(string(b))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn i, nil\n\tcase reflect.Float64:\n\t\tf, err := strconv.ParseFloat(string(b), 64)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn f, nil\n\t}\n\treturn string(b), nil\n}", "func DecodeTyped(data []byte) (*Typed, error) {\n\tvar typed Typed\n\tif err := proto.Unmarshal(data, &typed); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &typed, nil\n}", "func (v *Value) Decode(ptr interface{}) {\n switch p := ptr.(type) {\n case *[]byte:\n *p = v.Bytes()\n case *string:\n *p = v.String()\n case *bool:\n *p = Util.ToBool(v.data)\n case *float32, *float64:\n fv := Util.ToFloat(v.data)\n rv := reflect.ValueOf(ptr).Elem()\n rv.Set(reflect.ValueOf(fv).Convert(rv.Type()))\n case *int, *int8, *int16, *int32, *int64:\n iv := Util.ToInt(v.data)\n rv := reflect.ValueOf(ptr).Elem()\n rv.Set(reflect.ValueOf(iv).Convert(rv.Type()))\n case *uint, *uint8, *uint16, *uint32, *uint64:\n iv := Util.ToInt(v.data)\n rv := reflect.ValueOf(ptr).Elem()\n rv.Set(reflect.ValueOf(iv).Convert(rv.Type()))\n default:\n if e := json.Unmarshal(v.Bytes(), ptr); e != nil {\n rv := reflect.ValueOf(ptr)\n if rv.Kind() != reflect.Ptr || rv.IsNil() {\n panic(\"Value.Decode: require a valid pointer\")\n }\n\n if rv = rv.Elem(); rv.Kind() == reflect.Interface {\n rv.Set(reflect.ValueOf(v.data))\n } else {\n panic(\"Value.Decode: \" + e.Error())\n }\n }\n }\n}", "func UnmarshalType(b []byte, outputType reflect.Type) (interface{}, error) {\n\n\tif len(b) == 0 {\n\t\treturn nil, ErrEmptyInput\n\t}\n\n\t// If the kind of the output type is interface{}, then simply use Unmarshal.\n\tif outputType.Kind() == reflect.Interface {\n\t\treturn Unmarshal(b)\n\t}\n\n\tif bytes.Equal(b, Y) || bytes.Equal(b, True) {\n\t\tif outputType.Kind() != reflect.Bool {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Bool}}\n\t\t}\n\t\treturn true, nil\n\t}\n\tif bytes.Equal(b, False) {\n\t\tif outputType.Kind() != reflect.Bool {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Bool}}\n\t\t}\n\t\treturn false, nil\n\t}\n\tif bytes.Equal(b, Null) {\n\t\treturn nil, nil\n\t}\n\n\tif bytes.HasPrefix(b, BoundaryMarker) {\n\t\tif outputType.Kind() != reflect.Slice {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Slice}}\n\t\t}\n\t\ts := NewDocumentScanner(bytes.NewReader(b), true)\n\t\tout := reflect.MakeSlice(outputType, 0, 0)\n\t\ti := 0\n\t\tfor s.Scan() {\n\t\t\tif d := s.Bytes(); len(d) > 0 {\n\t\t\t\tobj, err := UnmarshalType(d, outputType.Elem())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn out.Interface(), errors.Wrapf(err, \"error scanning document %d\", i)\n\t\t\t\t}\n\t\t\t\tout = reflect.Append(out, reflect.ValueOf(obj))\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\t\tif err := s.Err(); err != nil {\n\t\t\treturn out.Interface(), errors.Wrap(err, fmt.Sprintf(\"error scanning YAML %q\", string(b)))\n\t\t}\n\t\treturn out.Interface(), nil\n\t}\n\n\tfirst, _ := utf8.DecodeRune(b)\n\tif first == utf8.RuneError {\n\t\treturn nil, ErrInvalidRune\n\t}\n\n\tswitch first {\n\tcase '[', '-':\n\t\tif outputType.Kind() != reflect.Slice {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Slice}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeSlice(outputType, 0, 0))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\tcase '{':\n\t\tif k := outputType.Kind(); k != reflect.Map {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Map}}\n\t\t}\n\t\tptr := reflect.New(outputType)\n\t\tptr.Elem().Set(reflect.MakeMap(outputType))\n\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn ptr.Elem().Interface(), nil\n\tcase '\"':\n\t\tif k := outputType.Kind(); k != reflect.String {\n\t\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.String}}\n\t\t}\n\t\tobj := \"\"\n\t\terr := goyaml.Unmarshal(b, &obj)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn obj, nil\n\t}\n\n\tif _, _, ok := ParseKeyValue(b); ok {\n\t\tk := outputType.Kind()\n\n\t\tif k == reflect.Map {\n\t\t\tptr := reflect.New(outputType)\n\t\t\tptr.Elem().Set(reflect.MakeMap(outputType))\n\t\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q into map\", string(b)))\n\t\t\t}\n\t\t\treturn ptr.Elem().Interface(), nil\n\t\t}\n\n\t\tif k == reflect.Struct {\n\t\t\tptr := reflect.New(outputType)\n\t\t\tptr.Elem().Set(reflect.Zero(outputType))\n\t\t\terr := goyaml.Unmarshal(b, ptr.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q into struct\", string(b)))\n\t\t\t}\n\t\t\treturn ptr.Elem().Interface(), nil\n\t\t}\n\n\t\treturn nil, &ErrInvalidKind{Value: outputType, Expected: []reflect.Kind{reflect.Map, reflect.Struct}}\n\t}\n\n\tswitch outputType.Kind() {\n\tcase reflect.Int:\n\t\ti, err := strconv.Atoi(string(b))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn i, nil\n\tcase reflect.Float64:\n\t\tf, err := strconv.ParseFloat(string(b), 64)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"error unmarshaling YAML %q\", string(b)))\n\t\t}\n\t\treturn f, nil\n\tcase reflect.String:\n\t\tstr := strings.TrimSpace(string(b))\n\t\tif len(str) > 0 {\n\t\t\treturn str, nil\n\t\t}\n\t\t// if empty string, then return nil\n\t\treturn nil, nil\n\t}\n\n\treturn nil, errors.Errorf(\"could not unmarshal YAML %q into type %v\", string(b), outputType)\n}", "func (d *Decoder) DecodeInterface() (interface{}, error) {\n\tc, err := d.readCode()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif msgpcode.IsFixedNum(c) {\n\t\treturn int8(c), nil\n\t}\n\tif msgpcode.IsFixedMap(c) {\n\t\terr = d.s.UnreadByte()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn d.decodeMapDefault()\n\t}\n\tif msgpcode.IsFixedArray(c) {\n\t\treturn d.decodeSlice(c)\n\t}\n\tif msgpcode.IsFixedString(c) {\n\t\treturn d.string(c)\n\t}\n\n\tswitch c {\n\tcase msgpcode.Nil:\n\t\treturn nil, nil\n\tcase msgpcode.False, msgpcode.True:\n\t\treturn d.bool(c)\n\tcase msgpcode.Float:\n\t\treturn d.float32(c)\n\tcase msgpcode.Double:\n\t\treturn d.float64(c)\n\tcase msgpcode.Uint8:\n\t\treturn d.uint8()\n\tcase msgpcode.Uint16:\n\t\treturn d.uint16()\n\tcase msgpcode.Uint32:\n\t\treturn d.uint32()\n\tcase msgpcode.Uint64:\n\t\treturn d.uint64()\n\tcase msgpcode.Int8:\n\t\treturn d.int8()\n\tcase msgpcode.Int16:\n\t\treturn d.int16()\n\tcase msgpcode.Int32:\n\t\treturn d.int32()\n\tcase msgpcode.Int64:\n\t\treturn d.int64()\n\tcase msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32:\n\t\treturn d.bytes(c, nil)\n\tcase msgpcode.Str8, msgpcode.Str16, msgpcode.Str32:\n\t\treturn d.string(c)\n\tcase msgpcode.Array16, msgpcode.Array32:\n\t\treturn d.decodeSlice(c)\n\tcase msgpcode.Map16, msgpcode.Map32:\n\t\terr = d.s.UnreadByte()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn d.decodeMapDefault()\n\tcase msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16,\n\t\tmsgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32:\n\t\treturn d.decodeInterfaceExt(c)\n\t}\n\n\treturn 0, fmt.Errorf(\"msgpack: unknown code %x decoding interface{}\", c)\n}", "func (d *Decoder) Decode(ctx context.Context, ref blob.Ref, obj interface{}) error {\n\tif u, ok := obj.(Unmarshaler); ok {\n\t\treturn u.PkUnmarshal(ctx, d.src, ref)\n\t}\n\n\tv := reflect.ValueOf(obj)\n\tt := v.Type()\n\tif t.Kind() != reflect.Ptr {\n\t\treturn ErrNotPointer\n\t}\n\tif v.IsNil() {\n\t\treturn ErrNilPointer\n\t}\n\n\tr, size, err := d.src.Fetch(ctx, ref)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"fetching %s from src\", ref)\n\t}\n\tdefer r.Close()\n\n\ts, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"reading body of %s\", ref)\n\t}\n\n\telTyp := t.Elem()\n\n\tswitch elTyp.Kind() {\n\tcase reflect.Bool:\n\t\tp := obj.(*bool)\n\t\t*p = (size > 0)\n\t\treturn nil\n\n\tcase reflect.Int:\n\t\tn, err := strconv.ParseInt(string(s), 10, 0)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int)\n\t\t*p = int(n)\n\t\treturn nil\n\n\tcase reflect.Int8:\n\t\tn, err := strconv.ParseInt(string(s), 10, 8)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int8 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int8)\n\t\t*p = int8(n)\n\t\treturn nil\n\n\tcase reflect.Int16:\n\t\tn, err := strconv.ParseInt(string(s), 10, 16)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int16 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int16)\n\t\t*p = int16(n)\n\t\treturn nil\n\n\tcase reflect.Int32:\n\t\tn, err := strconv.ParseInt(string(s), 10, 32)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int32 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int32)\n\t\t*p = int32(n)\n\t\treturn nil\n\n\tcase reflect.Int64:\n\t\tn, err := strconv.ParseInt(string(s), 10, 64)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing int64 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*int64)\n\t\t*p = n\n\t\treturn nil\n\n\tcase reflect.Uint:\n\t\tn, err := strconv.ParseUint(string(s), 10, 0)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint)\n\t\t*p = uint(n)\n\t\treturn nil\n\n\tcase reflect.Uint8:\n\t\tn, err := strconv.ParseUint(string(s), 10, 8)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint8 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint8)\n\t\t*p = uint8(n)\n\t\treturn nil\n\n\tcase reflect.Uint16:\n\t\tn, err := strconv.ParseUint(string(s), 10, 16)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint16 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint16)\n\t\t*p = uint16(n)\n\t\treturn nil\n\n\tcase reflect.Uint32:\n\t\tn, err := strconv.ParseUint(string(s), 10, 32)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint32 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint32)\n\t\t*p = uint32(n)\n\t\treturn nil\n\n\tcase reflect.Uint64:\n\t\tn, err := strconv.ParseUint(string(s), 10, 64)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing uint64 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*uint64)\n\t\t*p = n\n\t\treturn nil\n\n\tcase reflect.Float32:\n\t\tf, err := strconv.ParseFloat(string(s), 32)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing float32 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*float32)\n\t\t*p = float32(f)\n\t\treturn nil\n\n\tcase reflect.Float64:\n\t\tf, err := strconv.ParseFloat(string(s), 64)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing float64 from %s\", string(s))\n\t\t}\n\t\tp := obj.(*float64)\n\t\t*p = f\n\t\treturn nil\n\n\tcase reflect.Array:\n\t\tvar refs []blob.Ref\n\t\tdec := d.newJSONDecoder(bytes.NewReader(s))\n\t\terr := dec.Decode(&refs)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"JSON-decoding blobref array\")\n\t\t}\n\t\tarr := v.Elem()\n\t\treturn d.buildArray(ctx, arr, refs)\n\n\tcase reflect.Slice:\n\t\tvar refs []blob.Ref\n\t\tdec := d.newJSONDecoder(bytes.NewReader(s))\n\t\terr := dec.Decode(&refs)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"JSON-decoding blobref slice\")\n\t\t}\n\t\tslice := v.Elem()\n\t\tslice, err = d.buildSlice(ctx, slice, refs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Elem().Set(slice)\n\t\treturn nil\n\n\tcase reflect.Map:\n\t\tkt := elTyp.Key()\n\t\tmt := reflect.MapOf(kt, reftype)\n\t\tmm := reflect.New(mt)\n\t\tdec := d.newJSONDecoder(bytes.NewReader(s))\n\t\terr := dec.Decode(mm.Interface())\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"JSON-decoding map[K]blob.Ref\")\n\t\t}\n\t\treturn d.buildMap(ctx, v.Elem(), mm.Elem())\n\n\tcase reflect.String:\n\t\tp := obj.(*string)\n\t\t*p = string(s)\n\t\treturn nil\n\n\tcase reflect.Struct:\n\t\t// Construct an intermediate struct type for JSON-unmarshaling into.\n\n\t\tvar ftypes []reflect.StructField\n\t\tfor i := 0; i < elTyp.NumField(); i++ {\n\t\t\ttf := elTyp.Field(i)\n\t\t\tname, o := parseTag(tf)\n\t\t\ttf.Tag = reflect.StructTag(fmt.Sprintf(`%s json:\"%s\"`, tf.Tag, name))\n\t\t\tif o.omit || o.inline {\n\t\t\t\tftypes = append(ftypes, tf)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !o.external {\n\t\t\t\tswitch tf.Type.Kind() {\n\t\t\t\tcase reflect.Slice:\n\t\t\t\t\ttf.Type = reflect.SliceOf(reftype)\n\t\t\t\t\tftypes = append(ftypes, tf)\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase reflect.Array:\n\t\t\t\t\ttf.Type = reflect.SliceOf(reftype) // sic, not ArrayOf\n\t\t\t\t\tftypes = append(ftypes, tf)\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase reflect.Map:\n\t\t\t\t\ttf.Type = reflect.MapOf(tf.Type.Key(), reftype)\n\t\t\t\t\tftypes = append(ftypes, tf)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\ttf.Type = reftype\n\t\t\tftypes = append(ftypes, tf)\n\t\t}\n\t\tintermediateTyp := reflect.StructOf(ftypes)\n\t\tintermediateStruct := reflect.New(intermediateTyp)\n\t\tdec := d.newJSONDecoder(bytes.NewReader(s))\n\t\terr := dec.Decode(intermediateStruct.Interface())\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"JSON-decoding into intermediate struct\")\n\t\t}\n\n\t\tstructVal := v.Elem()\n\t\tfor i := 0; i < elTyp.NumField(); i++ {\n\t\t\ttf := elTyp.Field(i)\n\t\t\tname, o := parseTag(tf)\n\t\t\tif o.omit {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfield := structVal.Field(i)\n\t\t\tifield := intermediateStruct.Elem().Field(i)\n\t\t\tif o.inline {\n\t\t\t\tfield.Set(ifield)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !o.external {\n\t\t\t\tswitch tf.Type.Kind() {\n\t\t\t\tcase reflect.Slice:\n\t\t\t\t\trefs := ifield.Interface().([]blob.Ref)\n\t\t\t\t\tslice, err := d.buildSlice(ctx, field, refs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err, \"building slice for field %s\", name)\n\t\t\t\t\t}\n\t\t\t\t\tfield.Set(slice)\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase reflect.Array:\n\t\t\t\t\trefs := ifield.Interface().([]blob.Ref)\n\t\t\t\t\terr = d.buildArray(ctx, field, refs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err, \"building array for field %s\", name)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase reflect.Map:\n\t\t\t\t\terr = d.buildMap(ctx, field, ifield)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err, \"building map for field %s\", name)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ifield.IsZero() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfieldRef := ifield.Interface().(blob.Ref)\n\t\t\tnewFieldVal := reflect.New(tf.Type)\n\t\t\terr = d.Decode(ctx, fieldRef, newFieldVal.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"decoding ref %s for field %s\", fieldRef, name)\n\t\t\t}\n\t\t\tfield.Set(newFieldVal.Elem())\n\t\t}\n\t\treturn nil\n\n\tcase reflect.Ptr:\n\t\tptr := v.Elem()\n\t\tif ptr.IsNil() {\n\t\t\tnewItem := reflect.New(elTyp.Elem())\n\t\t\tv.Elem().Set(newItem)\n\t\t}\n\t\t// Recursively unmarshal into the thing ptr points to.\n\t\treturn d.Decode(ctx, ref, ptr.Interface())\n\n\tdefault:\n\t\treturn ErrUnsupportedType{Name: t.Name()}\n\t}\n}", "func (b *ProxyTypeBox) Decode(buf *bin.Buffer) error {\n\tif b == nil {\n\t\treturn fmt.Errorf(\"unable to decode ProxyTypeBox to nil\")\n\t}\n\tv, err := DecodeProxyType(buf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to decode boxed value: %w\", err)\n\t}\n\tb.ProxyType = v\n\treturn nil\n}", "func (v *Type) Decode(sr stream.Reader) error {\n\n\tif err := sr.ReadStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tfh, ok, err := sr.ReadFieldBegin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor ok {\n\t\tswitch {\n\t\tcase fh.ID == 1 && fh.Type == wire.TI32:\n\t\t\tvar x SimpleType\n\t\t\tx, err = _SimpleType_Decode(sr)\n\t\t\tv.SimpleType = &x\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 2 && fh.Type == wire.TStruct:\n\t\t\tv.SliceType, err = _Type_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 3 && fh.Type == wire.TStruct:\n\t\t\tv.KeyValueSliceType, err = _TypePair_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 4 && fh.Type == wire.TStruct:\n\t\t\tv.MapType, err = _TypePair_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 5 && fh.Type == wire.TStruct:\n\t\t\tv.ReferenceType, err = _TypeReference_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 6 && fh.Type == wire.TStruct:\n\t\t\tv.PointerType, err = _Type_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tdefault:\n\t\t\tif err := sr.Skip(fh.Type); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := sr.ReadFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fh, ok, err = sr.ReadFieldBegin(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := sr.ReadStructEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tcount := 0\n\tif v.SimpleType != nil {\n\t\tcount++\n\t}\n\tif v.SliceType != nil {\n\t\tcount++\n\t}\n\tif v.KeyValueSliceType != nil {\n\t\tcount++\n\t}\n\tif v.MapType != nil {\n\t\tcount++\n\t}\n\tif v.ReferenceType != nil {\n\t\tcount++\n\t}\n\tif v.PointerType != nil {\n\t\tcount++\n\t}\n\tif count != 1 {\n\t\treturn fmt.Errorf(\"Type should have exactly one field: got %v fields\", count)\n\t}\n\n\treturn nil\n}", "func (d *Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {\n\td.log.Debugf(\"decoding type %s\", d.valType)\n\t// if we are dealing with an empty interface, skip it.\n\tif d.isEmptyInterface(ptr) {\n\t\td.log.Warn(\"cannot encode to empty interface\")\n\t\titer.Skip()\n\t\treturn\n\t}\n\t// we really shouldn't be here with an invalid token, if for\n\t// some reason we are, call the default decoder and bail.\n\tif d.token == InvalidToken {\n\t\td.log.Warn(\"invalid token\")\n\t\td.decoder.Decode(ptr, iter)\n\t\treturn\n\t}\n\t// get the from type\n\tfromType := iter.WhatIsNext()\n\t// secure tokens will be type string. if this is not\n\t// a string, call the default decoder and bail.\n\tif fromType != jsoniter.StringValue {\n\t\td.log.Debug(\"skipping non-string value\")\n\t\td.decoder.Decode(ptr, iter)\n\t\treturn\n\t}\n\t// read the string & for mat a key\n\tkey := Key(iter.ReadString())\n\t// check to see if it is one of ours\n\tif !key.IsTokenKey(d.token) {\n\t\t// we use an Iterator avoid setting the ptr directly since it might be a string\n\t\t// or an interface or who knows what. this was the codecs handle it for us.\n\t\tsubIter := iter.Pool().BorrowIterator([]byte(fmt.Sprintf(`\"%s\"`, key)))\n\t\tdefer iter.Pool().ReturnIterator(subIter)\n\t\td.log.Debugf(\"decode string: %s\", key)\n\t\t// decode the string\n\t\td.decoder.Decode(ptr, subIter)\n\t\treturn\n\t}\n\t// we have a valid lookup key. look it up in our table\n\tval, err := d.lookupKey(key)\n\t// did we find something in the lookup table?\n\tif err != nil || val == nil {\n\t\td.log.Debugf(\"lookup entry not found: %s\", key)\n\t\t// this is expected when sparse decoding a struct.\n\t\tif d.valType.Kind() == reflect.Interface {\n\t\t\td.log.Debugf(\"decode empty %s for interface\", key.Kind())\n\t\t\t// if we have a map then set an explicitly typed empty value\n\t\t\t*(*interface{})(ptr) = emptyValueOfKind(key.Kind())\n\t\t}\n\t\treturn\n\t}\n\t// clear the buffer\n\td.stream.Reset(nil)\n\tval.WriteTo(d.stream)\n\tsubIter := iter.Pool().BorrowIterator(d.stream.Buffer())\n\tdefer iter.Pool().ReturnIterator(subIter)\n\t// decode the string\n\td.decoder.Decode(ptr, subIter)\n\td.log.Debugf(\"decoded lookup entry for %s: %s\", key, string(d.stream.Buffer()))\n}", "func decodeWireType(r io.ByteReader) *wireType {\n\twt := new(wireType)\n\tf := -1\n\tfor {\n\t\tdf, err := decodeUint(r)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif df == 0 {\n\t\t\tbreak\n\t\t}\n\t\tf += int(df)\n\t\tswitch f {\n\t\tcase 0:\n\t\t\twt.ArrayT = decodeArrayType(r)\n\t\tcase 1:\n\t\t\twt.SliceT = decodeSliceType(r)\n\t\tcase 2:\n\t\t\twt.StructT = decodeStructType(r)\n\t\tcase 3:\n\t\t\twt.MapT = decodeMapType(r)\n\t\t}\n\t}\n\treturn wt\n}", "func Decode(reader io.Reader, boundary string) (Type, error) {\n\tr := bufio.NewReader(reader)\n\tif len(boundary) > 0 {\n\t\treturn decodeform(r, boundary)\n\t}\n\tpeek, err := r.Peek(2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch string(peek) {\n\tcase `--`:\n\t\treturn decodeform(r, boundary)\n\t}\n\treturn decodevars(r)\n}", "func (d *Decoder) Decode(v interface{}) (err error) {\n\t// v must be a pointer\n\tpval := reflect.ValueOf(v)\n\tif pval.Kind() != reflect.Ptr || pval.IsNil() {\n\t\treturn errBadPointer\n\t}\n\n\t// catch decoding panics and convert them to errors\n\t// note that this allows us to skip boundary checks during decoding\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"could not decode type %s: %v\", pval.Elem().Type().String(), r)\n\t\t}\n\t}()\n\n\t// reset the read count\n\td.n = 0\n\n\td.decode(pval.Elem())\n\treturn\n}", "func (s *impl) Decode(val interface{}, tType model.Type) (ret model.Value, err error) {\n\tif !tType.IsBasic() {\n\t\terr = fmt.Errorf(\"illegal value type, type:%s\", tType.GetName())\n\t\treturn\n\t}\n\n\tswitch tType.GetValue() {\n\tcase util.TypeBooleanField:\n\t\tret, err = s.decodeBool(val, tType)\n\tcase util.TypeDateTimeField:\n\t\tret, err = s.decodeDateTime(val, tType)\n\tcase util.TypeFloatField, util.TypeDoubleField:\n\t\tret, err = s.decodeFloat(val, tType)\n\tcase util.TypeBitField, util.TypeSmallIntegerField, util.TypeInteger32Field, util.TypeIntegerField, util.TypeBigIntegerField:\n\t\tret, err = s.decodeInt(val, tType)\n\tcase util.TypePositiveBitField, util.TypePositiveSmallIntegerField, util.TypePositiveInteger32Field, util.TypePositiveIntegerField, util.TypePositiveBigIntegerField:\n\t\tret, err = s.decodeUint(val, tType)\n\tcase util.TypeSliceField:\n\t\tret, err = s.decodeSlice(val, tType)\n\tcase util.TypeStringField:\n\t\tret, err = s.decodeString(val, tType)\n\tdefault:\n\t\terr = fmt.Errorf(\"illegal type, type:%s\", tType.GetName())\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif tType.IsPtrType() && !ret.IsNil() {\n\t\tret = ret.Addr()\n\t}\n\n\treturn\n}", "func nakedDecode(src []byte, t Type) (int, error) {\n\t// decode header\n\thl, _, rl, err := headerDecode(src, t)\n\n\t// check remaining length\n\tif rl != 0 {\n\t\treturn hl, makeError(t, \"expected zero remaining length\")\n\t}\n\n\treturn hl, err\n}", "func makeDecoder(typ reflect.Type, tags tags) (dec decoder, err error) {\n\tkind := typ.Kind()\n\tswitch {\n\tcase typ == rawValueType:\n\t\treturn decodeRawValue, nil\n\t// for data that implemented DecodeRLP method (pointer receiver)\n\tcase typ.Implements(decoderInterface):\n\t\treturn decodeDecoder, nil\n\t// pointer type of the variable implements the decoder interface (pointer receiver)\n\tcase kind != reflect.Ptr && reflect.PtrTo(typ).Implements(decoderInterface):\n\t\treturn decodeDecoderNoPtr, nil\n\t// if the type is *bigInt\n\tcase typ.AssignableTo(reflect.PtrTo(bigInt)):\n\t\treturn decodeBigInt, nil\n\tcase typ.AssignableTo(bigInt):\n\t\treturn decodeBigIntNoPtr, nil\n\tcase isUint(kind):\n\t\treturn decodeUint, nil\n\tcase kind == reflect.Bool:\n\t\treturn decodeBool, nil\n\tcase kind == reflect.String:\n\t\treturn decodeString, nil\n\tcase kind == reflect.Slice || kind == reflect.Array:\n\t\treturn makeListDecoder(typ, tags)\n\tcase kind == reflect.Struct:\n\t\treturn makeStructDecoder(typ)\n\tcase kind == reflect.Ptr:\n\t\tif tags.nilOK {\n\t\t\treturn makeOptionalPtrDecoder(typ)\n\t\t}\n\t\treturn makePtrDecoder(typ)\n\tcase kind == reflect.Interface:\n\t\treturn decodeInterface, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"rlp: type %v is not RLP-serializable\", typ)\n\t}\n}", "func DecodeFrom(d encoding.Decoder, x interface{}, typ reflect.Type) error {\n\tfrom := reflect.New(typ)\n\tif err := d.Decode(from.Interface()); err != nil {\n\t\treturn err\n\t}\n\treturn convertFrom(reflect.ValueOf(x), from)\n}", "func (t *RawJSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\treturn errors.New(\"binary datatype is not supported by RawJSONTranscoder\")\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"string datatype is not supported by RawJSONTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *string:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode raw JSON data in a byte array or string\")\n\t\t}\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func makeOptionalPtrDecoder(typ reflect.Type) (decoder, error) {\n\t// get the underlying element type and the corresponded decoder\n\tetype := typ.Elem()\n\tetypeinfo, err := cachedTypeInfo1(etype, tags{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdec := func(s *Stream, val reflect.Value) (err error) {\n\t\tkind, size, err := s.Kind()\n\t\t// criteria on checking if the value is empty\n\t\tif err != nil || size == 0 && kind != Byte {\n\t\t\ts.kind = -1 // rearm the kind\n\t\t\tval.Set(reflect.Zero(typ)) // set the value to be 0 with the type pointed by the pointer\n\t\t\treturn err\n\t\t}\n\t\tnewval := val\n\n\t\t// if the val pointed to nil, allocates space (allocate space in storage)\n\t\tif val.IsNil() {\n\t\t\tnewval = reflect.New(etype)\n\t\t}\n\t\t// decode data and set val\n\t\tif err = etypeinfo.decoder(s, newval.Elem()); err == nil {\n\t\t\tval.Set(newval)\n\t\t}\n\t\treturn err\n\t}\n\treturn dec, nil\n}", "func protoDec(t reflect.Type, in []byte) (T, error) {\n\tvar p protoreflect.ProtoMessage\n\tswitch it := reflect.New(t.Elem()).Interface().(type) {\n\tcase protoreflect.ProtoMessage:\n\t\tp = it\n\tcase protov1.Message:\n\t\tp = protov1.MessageV2(it)\n\t}\n\terr := protov2.UnmarshalOptions{}.Unmarshal(in, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}", "func Decode(dec Decoder, v interface{}) error {\n\tvar err error\n\tswitch vt := v.(type) {\n\tcase *string:\n\t\terr = dec.String(vt)\n\tcase **string:\n\t\tif vt == nil {\n\t\t\t*vt = new(string)\n\t\t}\n\t\terr = dec.String(*vt)\n\tcase *int:\n\t\terr = dec.Int(vt)\n\tcase **int:\n\t\tif vt == nil {\n\t\t\t*vt = new(int)\n\t\t}\n\t\terr = dec.Int(*vt)\n\tcase *int8:\n\t\terr = dec.Int8(vt)\n\tcase **int8:\n\t\tif vt == nil {\n\t\t\t*vt = new(int8)\n\t\t}\n\t\terr = dec.Int8(*vt)\n\tcase *int16:\n\t\terr = dec.Int16(vt)\n\tcase **int16:\n\t\tif vt == nil {\n\t\t\t*vt = new(int16)\n\t\t}\n\t\terr = dec.Int16(*vt)\n\tcase *int32:\n\t\terr = dec.Int32(vt)\n\tcase **int32:\n\t\tif vt == nil {\n\t\t\t*vt = new(int32)\n\t\t}\n\t\terr = dec.Int32(*vt)\n\tcase *int64:\n\t\terr = dec.Int64(vt)\n\tcase **int64:\n\t\tif vt == nil {\n\t\t\t*vt = new(int64)\n\t\t}\n\t\terr = dec.Int64(*vt)\n\tcase *uint8:\n\t\terr = dec.UInt8(vt)\n\tcase **uint8:\n\t\tif vt == nil {\n\t\t\t*vt = new(uint8)\n\t\t}\n\t\terr = dec.UInt8(*vt)\n\tcase *uint16:\n\t\terr = dec.UInt16(vt)\n\tcase **uint16:\n\t\tif vt == nil {\n\t\t\t*vt = new(uint16)\n\t\t}\n\t\terr = dec.UInt16(*vt)\n\tcase *uint32:\n\t\terr = dec.UInt32(vt)\n\tcase **uint32:\n\t\tif vt == nil {\n\t\t\t*vt = new(uint32)\n\t\t}\n\t\terr = dec.UInt32(*vt)\n\tcase *uint64:\n\t\terr = dec.UInt64(vt)\n\tcase **uint64:\n\t\tif vt == nil {\n\t\t\t*vt = new(uint64)\n\t\t}\n\t\terr = dec.UInt64(*vt)\n\tcase *float64:\n\t\terr = dec.Float64(vt)\n\tcase **float64:\n\t\tif vt == nil {\n\t\t\t*vt = new(float64)\n\t\t}\n\t\terr = dec.Float64(*vt)\n\tcase *float32:\n\t\terr = dec.Float32(vt)\n\tcase **float32:\n\t\tif vt == nil {\n\t\t\t*vt = new(float32)\n\t\t}\n\t\terr = dec.Float32(*vt)\n\tcase *bool:\n\t\terr = dec.Bool(vt)\n\tcase **bool:\n\t\tif vt == nil {\n\t\t\t*vt = new(bool)\n\t\t}\n\t\terr = dec.Bool(*vt)\n\tcase DecodableObject:\n\t\terr = dec.Object(vt)\n\tcase DecodableList:\n\t\terr = dec.List(vt)\n\tdefault:\n\t\terr = ErrUndecodable\n\t}\n\treturn err\n}", "func (msg *Message) Decode(out interface{}) error {\n\tif msg.reader == nil {\n\t\tmsg.reader = bytes.NewReader(msg.Data)\n\t}\n\tdefer msg.c.decoderState.PushReader(msg.reader)()\n\treturn pvdata.Decode(msg.c.decoderState, out)\n}", "func (f *Frame) Read(out interface{}) error {\n\tswitch x := out.(type) {\n\tcase *uint8:\n\t\tif f.BytesRemaining() < 1 {\n\t\t\treturn io.EOF\n\t\t}\n\t\t*x = f.Payload[f.payloadPos]\n\t\tf.payloadPos++\n\tcase *uint16:\n\t\tif f.BytesRemaining() < 2 {\n\t\t\treturn io.EOF\n\t\t}\n\t\t*x = binary.LittleEndian.Uint16(f.Payload[f.payloadPos:])\n\t\tf.payloadPos += 2\n\tcase *uint32:\n\t\tif f.BytesRemaining() < 4 {\n\t\t\treturn io.EOF\n\t\t}\n\t\t*x = binary.LittleEndian.Uint32(f.Payload[f.payloadPos:])\n\t\tf.payloadPos += 4\n\tdefault:\n\t\tv := reflect.ValueOf(out)\n\t\tif v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {\n\t\t\telem := v.Elem()\n\t\t\tfor i := 0; i < elem.NumField(); i++ {\n\t\t\t\tif err := f.Read(elem.Field(i).Addr().Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif v.Kind() == reflect.Slice {\n\t\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\t\tif err := f.Read(v.Index(i).Addr().Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tpanic(fmt.Errorf(\"can't decode MSP payload into type %v\", out))\n\t}\n\treturn nil\n}", "func TypeFromByte(b byte) Type {\n\tswitch b {\n\tcase 0x0f:\n\t\treturn ResetMsg\n\tcase 0x10:\n\t\treturn RunMsg\n\tcase 0x2f:\n\t\treturn DiscardMsg\n\tcase 0x3f:\n\t\treturn PullMsg\n\tcase 0x71:\n\t\treturn RecordMsg\n\tcase 0x70:\n\t\treturn SuccessMsg\n\tcase 0x7e:\n\t\treturn IgnoreMsg\n\tcase 0x7f:\n\t\treturn FailureMsg\n\tcase 0x01:\n\t\treturn HelloMsg\n\tcase 0x02:\n\t\treturn GoodbyeMsg\n\tcase 0x11:\n\t\treturn BeginMsg\n\tcase 0x12:\n\t\treturn CommitMsg\n\tcase 0x13:\n\t\treturn RollbackMsg\n\tdefault:\n\t\treturn UnknownMsg\n\t}\n}", "func (d *Decoder) Decode(v interface{}) (err error) {\n\trv := reflect.Indirect(reflect.ValueOf(v))\n\tif !rv.CanAddr() {\n\t\treturn errors.New(\"binary: can only Decode to pointer type\")\n\t}\n\n\t// Scan the type (this will load from cache)\n\tvar c Codec\n\tif c, err = scan(rv.Type()); err == nil {\n\t\terr = c.DecodeTo(d, rv)\n\t}\n\n\treturn\n}", "func readResponse(p packetType) (response responseType, err error) {\n\t// The calls to bencode.Unmarshal() can be fragile.\n\tdefer func() {\n\t\tif x := recover(); x != nil {\n\t\t\tlogger.Infof(\"DHT: !!! Recovering from panic() after bencode.Unmarshal %q, %v\", string(p.b), x)\n\t\t}\n\t}()\n\tif e2 := bencode.Unmarshal(bytes.NewBuffer(p.b), &response); e2 == nil {\n\t\terr = nil\n\t\treturn\n\t} else {\n\t\tlogger.Infof(\"DHT: unmarshal error, odd or partial data during UDP read? %v, err=%s\", string(p.b), e2)\n\t\treturn response, e2\n\t}\n\treturn\n}", "func jsonDec(t reflect.Type, in []byte) (T, error) {\n\tval := reflect.New(t)\n\tif err := jsonx.Unmarshal(val.Interface(), in); err != nil {\n\t\treturn nil, err\n\t}\n\treturn val.Elem().Interface(), nil\n}", "func (s *Stream) Decode(val interface{}) error {\n\t// if val does not pointed to any address, there is no place to store the decoded data\n\tif val == nil {\n\t\treturn errDecodeIntoNil\n\t}\n\n\t// getting the value and the type of val\n\trval := reflect.ValueOf(val)\n\trtyp := rval.Type()\n\n\t// the passed in val must be a pointer\n\tif rtyp.Kind() != reflect.Ptr {\n\t\treturn errNoPointer\n\t}\n\n\t// checked again if passed in val is pointed to nil\n\tif rval.IsNil() {\n\t\treturn errDecodeIntoNil\n\t}\n\n\t// get the decoder based on the data type pointed by the val\n\tinfo, err := cachedTypeInfo(rtyp.Elem(), tags{})\n\tif err != nil {\n\t\treturn err\n\t}\n\t// passed in stream as well as the value of data pointed by val that will be used to store the decoded data\n\terr = info.decoder(s, rval.Elem())\n\n\t// check if the err is type *decodeError and the length of ctx is greater than 0\n\tif decErr, ok := err.(*decodeError); ok && len(decErr.ctx) > 0 {\n\t\tdecErr.ctx = append(decErr.ctx, fmt.Sprint(\"(\", rtyp.Elem(), \")\"))\n\t}\n\treturn err\n}", "func (t *RawStringTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\treturn errors.New(\"only string datatype is supported by RawStringTranscoder\")\n\t} else if valueType == gocbcore.StringType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *string:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode a string in a string or interface\")\n\t\t}\n\t} else if valueType == gocbcore.JSONType {\n\t\treturn errors.New(\"only string datatype is supported by RawStringTranscoder\")\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}", "func DecodeResponse(response []byte, responseType interface{}) (interface{}, error) {\n\tif err := json.Unmarshal([]byte(response), &responseType); err != nil {\n\t\tlog.Printf(\"error detected unmarshalling response: %v\", err)\n\t\treturn nil, err\n\t}\n\treturn responseType, nil\n}", "func Decode(v interface{}) error {\n\tval := reflect.ValueOf(v)\n\tt := reflect.TypeOf(v)\n\n\tif val.Kind() != reflect.Ptr {\n\t\treturn newDecodeError(\"must decode to pointer\", \"\", nil)\n\t}\n\n\tfor t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\n\t\tif val.IsNil() {\n\t\t\tval.Set(reflect.New(t))\n\t\t}\n\n\t\tval = val.Elem()\n\t}\n\n\tif t.Kind() != reflect.Struct {\n\t\treturn newDecodeError(fmt.Sprintf(\"cannot decode into value of type: %s\", t.String()), \"\", nil)\n\t}\n\n\tnewVal := reflect.New(t)\n\n\terr := decodeFields(val, newVal.Elem())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tval.Set(newVal.Elem())\n\n\treturn nil\n}", "func (d *Decoder) Decode(v interface{}) error {\n\tval := reflect.ValueOf(v)\n\ttyp := reflect.TypeOf(v)\n\n\tif typ.Kind() != reflect.Ptr {\n\t\treturn &TypeError{typ}\n\t}\n\n\tif typ.Elem().Kind() == reflect.Slice {\n\t\treturn d.decodeSlice(val)\n\t}\n\tp, err := d.getPairs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch typ.Elem().Kind() {\n\tdefault:\n\t\treturn &TypeError{val.Type()}\n\tcase reflect.Map:\n\t\tif val.Elem().IsNil() {\n\t\t\tval.Elem().Set(reflect.MakeMap(typ.Elem()))\n\t\t}\n\t\treturn d.saveMap(p, val.Elem())\n\tcase reflect.Struct:\n\t\tif val.IsNil() {\n\t\t\treturn &TypeError{nil}\n\t\t}\n\t\treturn d.saveStruct(p, val.Elem())\n\t}\n\treturn nil\n}", "func (f DecoderFunc) Decode(v interface{}) error { return f(v) }", "func unpack(value nlgo.Binary, out interface{}) error {\n\treturn binary.Read(bytes.NewReader(([]byte)(value)), binary.BigEndian, out)\n}", "func (p Typed) Decode() (fx.Message, error) {\n\tmsgType, ok := MessageTypes[p.TypeId]\n\tif !ok {\n\t\treturn nil, &ErrUnknownType{TypeID: p.TypeId}\n\t}\n\tmsg := msgType.NewMessage()\n\tserializable := msg.(SerializableMessage).Serializable()\n\tif err := proto.Unmarshal(p.Message, serializable); err != nil {\n\t\treturn nil, err\n\t}\n\treturn msg, nil\n}", "func decodeConfigMapT(t *testing.T, data []byte, typMap map[string]reflect.Type) (cfg configMapT, err error) {\n\tif data == nil || len(data) == 0 {\n\t\treturn nil, errors.New(\"nil byte\")\n\t}\n\tres, err := ToObject(data, typMap)\n\tif err != nil {\n\t\tt.Errorf(\"failed decode config map bytes: %v, %v\\n\", base64.StdEncoding.EncodeToString(data), err)\n\t\treturn nil, err\n\t}\n\n\tif sn, ok := res.(map[interface{}]interface{}); ok && len(sn) == 0 {\n\t\treturn configMapT{}, nil\n\t}\n\n\tt.Log(\"decoded: \", res)\n\tif sn, ok := res.(configMapT); ok {\n\t\tcfg = sn\n\t\treturn\n\t}\n\tt.Errorf(\"unexpect decode config map result: %v, type:%v, base64:%v\\n\", res, reflect.TypeOf(res), base64.StdEncoding.EncodeToString(data))\n\terr = errors.New(\"failed to decode config map\")\n\treturn\n}", "func schemaDec(t reflect.Type, in []byte) (T, error) {\n\tswitch t.Kind() {\n\tcase reflect.Slice, reflect.Array:\n\t\tt = t.Elem()\n\t}\n\tdecMu.Lock()\n\tdec, ok := schemaDecs[t]\n\tif !ok {\n\t\tvar err error\n\t\tdec, err = coder.RowDecoderForStruct(t)\n\t\tif err != nil {\n\t\t\tdecMu.Unlock()\n\t\t\treturn nil, err\n\t\t}\n\t\tschemaDecs[t] = dec\n\t}\n\tdecMu.Unlock()\n\tbuf := bytes.NewBuffer(in)\n\tval, err := dec(buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn val, nil\n}", "func (dec *Decoder) Decode(v interface{}) error {\n\tif dec.isPooled == 1 {\n\t\tpanic(InvalidUsagePooledDecoderError(\"Invalid usage of pooled decoder\"))\n\t}\n\tvar err error\n\tswitch vt := v.(type) {\n\tcase *string:\n\t\terr = dec.decodeString(vt)\n\tcase **string:\n\t\terr = dec.decodeStringNull(vt)\n\tcase *int:\n\t\terr = dec.decodeInt(vt)\n\tcase **int:\n\t\terr = dec.decodeIntNull(vt)\n\tcase *int8:\n\t\terr = dec.decodeInt8(vt)\n\tcase **int8:\n\t\terr = dec.decodeInt8Null(vt)\n\tcase *int16:\n\t\terr = dec.decodeInt16(vt)\n\tcase **int16:\n\t\terr = dec.decodeInt16Null(vt)\n\tcase *int32:\n\t\terr = dec.decodeInt32(vt)\n\tcase **int32:\n\t\terr = dec.decodeInt32Null(vt)\n\tcase *int64:\n\t\terr = dec.decodeInt64(vt)\n\tcase **int64:\n\t\terr = dec.decodeInt64Null(vt)\n\tcase *uint8:\n\t\terr = dec.decodeUint8(vt)\n\tcase **uint8:\n\t\terr = dec.decodeUint8Null(vt)\n\tcase *uint16:\n\t\terr = dec.decodeUint16(vt)\n\tcase **uint16:\n\t\terr = dec.decodeUint16Null(vt)\n\tcase *uint32:\n\t\terr = dec.decodeUint32(vt)\n\tcase **uint32:\n\t\terr = dec.decodeUint32Null(vt)\n\tcase *uint64:\n\t\terr = dec.decodeUint64(vt)\n\tcase **uint64:\n\t\terr = dec.decodeUint64Null(vt)\n\tcase *float64:\n\t\terr = dec.decodeFloat64(vt)\n\tcase **float64:\n\t\terr = dec.decodeFloat64Null(vt)\n\tcase *float32:\n\t\terr = dec.decodeFloat32(vt)\n\tcase **float32:\n\t\terr = dec.decodeFloat32Null(vt)\n\tcase *bool:\n\t\terr = dec.decodeBool(vt)\n\tcase **bool:\n\t\terr = dec.decodeBoolNull(vt)\n\tcase UnmarshalerJSONObject:\n\t\t_, err = dec.decodeObject(vt)\n\tcase UnmarshalerJSONArray:\n\t\t_, err = dec.decodeArray(vt)\n\tcase *EmbeddedJSON:\n\t\terr = dec.decodeEmbeddedJSON(vt)\n\tcase *interface{}:\n\t\terr = dec.decodeInterface(vt)\n\tdefault:\n\t\treturn InvalidUnmarshalError(fmt.Sprintf(invalidUnmarshalErrorMsg, reflect.TypeOf(vt).String()))\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn dec.err\n}", "func (dec *Decoder) Decode(v interface{}) error {\n\tif dec.isPooled == 1 {\n\t\tpanic(InvalidUsagePooledDecoderError(\"Invalid usage of pooled decoder\"))\n\t}\n\tvar err error\n\tswitch vt := v.(type) {\n\tcase *string:\n\t\terr = dec.decodeString(vt)\n\tcase **string:\n\t\terr = dec.decodeStringNull(vt)\n\tcase *int:\n\t\terr = dec.decodeInt(vt)\n\tcase **int:\n\t\terr = dec.decodeIntNull(vt)\n\tcase *int8:\n\t\terr = dec.decodeInt8(vt)\n\tcase **int8:\n\t\terr = dec.decodeInt8Null(vt)\n\tcase *int16:\n\t\terr = dec.decodeInt16(vt)\n\tcase **int16:\n\t\terr = dec.decodeInt16Null(vt)\n\tcase *int32:\n\t\terr = dec.decodeInt32(vt)\n\tcase **int32:\n\t\terr = dec.decodeInt32Null(vt)\n\tcase *int64:\n\t\terr = dec.decodeInt64(vt)\n\tcase **int64:\n\t\terr = dec.decodeInt64Null(vt)\n\tcase *uint8:\n\t\terr = dec.decodeUint8(vt)\n\tcase **uint8:\n\t\terr = dec.decodeUint8Null(vt)\n\tcase *uint16:\n\t\terr = dec.decodeUint16(vt)\n\tcase **uint16:\n\t\terr = dec.decodeUint16Null(vt)\n\tcase *uint32:\n\t\terr = dec.decodeUint32(vt)\n\tcase **uint32:\n\t\terr = dec.decodeUint32Null(vt)\n\tcase *uint64:\n\t\terr = dec.decodeUint64(vt)\n\tcase **uint64:\n\t\terr = dec.decodeUint64Null(vt)\n\tcase *float64:\n\t\terr = dec.decodeFloat64(vt)\n\tcase **float64:\n\t\terr = dec.decodeFloat64Null(vt)\n\tcase *float32:\n\t\terr = dec.decodeFloat32(vt)\n\tcase **float32:\n\t\terr = dec.decodeFloat32Null(vt)\n\tcase *bool:\n\t\terr = dec.decodeBool(vt)\n\tcase **bool:\n\t\terr = dec.decodeBoolNull(vt)\n\tcase UnmarshalerJSONObject:\n\t\t_, err = dec.decodeObject(vt)\n\tcase UnmarshalerJSONArray:\n\t\t_, err = dec.decodeArray(vt)\n\tcase *EmbeddedJSON:\n\t\terr = dec.decodeEmbeddedJSON(vt)\n\tcase *interface{}:\n\t\terr = dec.decodeInterface(vt)\n\tdefault:\n\t\treturn InvalidUnmarshalError(fmt.Sprintf(invalidUnmarshalErrorMsg, vt))\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn dec.err\n}", "func DecodeField(fieldName string, t reflect.Type, v reflect.Value, tag reflect.StructTag, buf *bits.BitSetBuffer, sizeMap map[string]int, options ...EncDecOption) error {\n\tprocessed, err := decUnmarshaler(v, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif processed {\n\t\treturn nil\n\t}\n\n\tendianness, err := getEndianness(tag)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t}\n\n\tswitch t.Kind() {\n\tcase reflect.Ptr:\n\t\tval := reflect.New(t.Elem())\n\t\terr := DecodeField(fieldName, t.Elem(), val.Elem(), tag, buf, sizeMap, options...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Set(val)\n\tcase reflect.Interface:\n\t\tfor _, enc := range options {\n\t\t\tif enc.Type() == v.Type() {\n\t\t\t\terr := enc.DecoderFunc()(fieldName, t, v, tag, buf, sizeMap, options...)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"interface:%v was not found: interface not supported\", t.Name())\n\tcase reflect.Struct:\n\t\tm := make(map[string]int)\n\t\tfor k, v := range sizeMap {\n\t\t\tm[k] = v\n\t\t}\n\t\tprocessed, err := decStructSpecial(fieldName, t, v, tag, buf, m, options...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif processed {\n\t\t\treturn nil\n\t\t}\n\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\tsf := t.Field(i)\n\t\t\tvf := v.Field(i)\n\t\t\tif _, has := sf.Tag.Lookup(\"omit\"); has {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := DecodeField(sf.Name, sf.Type, vf, sf.Tag, buf, m, options...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase reflect.Array:\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\titem := v.Index(i)\n\t\t\tif err := DecodeField(\"\", item.Type(), item, tag, buf, sizeMap, options...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase reflect.Slice:\n\t\tall := true\n\t\tsuint := 0\n\t\tif s, ok := tag.Lookup(\"size\"); ok {\n\t\t\ttmp, err := strconv.ParseUint(s, 10, 64)\n\t\t\tsuint = int(tmp)\n\t\t\tif err != nil {\n\t\t\t\ti, has := sizeMap[s]\n\t\t\t\tswitch {\n\t\t\t\tcase !has:\n\t\t\t\t\treturn fmt.Errorf(\"size must either be a positive number or a field found prior to this field :%v\", err)\n\t\t\t\tcase i < 0:\n\t\t\t\t\treturn fmt.Errorf(\"value of %v is %v, to be used for size it must be nonnegative\", s, i)\n\t\t\t\t}\n\n\t\t\t\tsuint = i\n\t\t\t}\n\t\t\tall = false\n\t\t}\n\n\t\treflectionValue := reflect.New(t)\n\t\treflectionValue.Elem().Set(reflect.MakeSlice(t, 0, 10))\n\t\tsliceValuePtr := reflect.ValueOf(reflectionValue.Interface()).Elem()\n\t\tfor i := 0; i < suint || (all && !buf.PosAtEnd()); i++ {\n\t\t\titem := reflect.New(t.Elem())\n\t\t\tif err := DecodeField(\"\", item.Elem().Type(), item.Elem(), tag, buf, sizeMap, options...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tsliceValuePtr.Set(reflect.Append(sliceValuePtr, item.Elem()))\n\t\t}\n\n\t\tv.Set(sliceValuePtr)\n\tcase reflect.String:\n\t\tall := true\n\t\tsuint := uint64(0)\n\t\tif s, ok := tag.Lookup(\"strlen\"); ok {\n\t\t\tvar err error\n\t\t\tsuint, err = strconv.ParseUint(s, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\ti, has := sizeMap[s]\n\t\t\t\tswitch {\n\t\t\t\tcase !has:\n\t\t\t\t\treturn fmt.Errorf(\"strlen must either be a positive number or a field found prior to this field :%v\", err)\n\t\t\t\tcase i < 0:\n\t\t\t\t\treturn fmt.Errorf(\"value of %v is %v, to be used for strlen it must be nonnegative\", s, i)\n\t\t\t\t}\n\n\t\t\t\tsuint = uint64(i)\n\n\t\t\t}\n\t\t\tall = false\n\t\t}\n\n\t\tif all {\n\t\t\tsb := strings.Builder{}\n\n\t\t\tfor {\n\t\t\t\tbs := make([]byte, suint)\n\t\t\t\tn, err := buf.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t\t\t}\n\t\t\t\tif n != len(bs) {\n\t\t\t\t\tsb.Write(bs[:n])\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tsb.Write(bs)\n\t\t\t}\n\n\t\t\tv.SetString(sb.String())\n\t\t} else {\n\t\t\tbs := make([]byte, suint)\n\t\t\terr := binary.Read(buf, endianness, bs)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t\t}\n\t\t\tv.SetString(string(bs))\n\t\t}\n\tcase reflect.Bool:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 8, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x bool\n\t\tif hasBits {\n\t\t\ttmp, err := bits.ReadUint(buf, numOfBits, endianness)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t\t}\n\t\t\tx = tmp > 0\n\t\t} else {\n\t\t\tif err := binary.Read(buf, endianness, &x); err != nil {\n\t\t\t\treturn fmt.Errorf(\"expected to read bool from %v: %v\", fieldName, err)\n\t\t\t}\n\t\t}\n\n\t\tv.SetBool(x)\n\tcase reflect.Uint8:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 8, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x uint8\n\t\tif hasBits {\n\t\t\tvar tmp uint64\n\t\t\ttmp, err = bits.ReadUint(buf, numOfBits, endianness)\n\t\t\tx = uint8(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetUint(uint64(x))\n\tcase reflect.Uint16:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 16, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x uint16\n\t\tif hasBits {\n\t\t\tvar tmp uint64\n\t\t\ttmp, err = bits.ReadUint(buf, numOfBits, endianness)\n\t\t\tx = uint16(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetUint(uint64(x))\n\tcase reflect.Uint32:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 32, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x uint32\n\t\tif hasBits {\n\t\t\tvar tmp uint64\n\t\t\ttmp, err = bits.ReadUint(buf, numOfBits, endianness)\n\t\t\tx = uint32(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetUint(uint64(x))\n\tcase reflect.Uint64:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 64, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x uint64\n\t\tif hasBits {\n\t\t\tx, err = bits.ReadUint(buf, numOfBits, endianness)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetUint(x)\n\tcase reflect.Int8:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 8, 2)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x int8\n\t\tif hasBits {\n\t\t\tvar tmp int64\n\t\t\ttmp, err = bits.ReadInt(buf, numOfBits, endianness)\n\t\t\tx = int8(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetInt(int64(x))\n\tcase reflect.Int16:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 16, 2)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x int16\n\t\tif hasBits {\n\t\t\tvar tmp int64\n\t\t\ttmp, err = bits.ReadInt(buf, numOfBits, endianness)\n\t\t\tx = int16(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetInt(int64(x))\n\tcase reflect.Int32:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 32, 2)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x int32\n\t\tif hasBits {\n\t\t\tvar tmp int64\n\t\t\ttmp, err = bits.ReadInt(buf, numOfBits, endianness)\n\t\t\tx = int32(tmp)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetInt(int64(x))\n\tcase reflect.Int64:\n\t\tnumOfBits, hasBits, err := getBits(tag, sizeMap, 64, 2)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tvar x int64\n\t\tif hasBits {\n\t\t\tx, err = bits.ReadInt(buf, numOfBits, endianness)\n\t\t} else {\n\t\t\terr = binary.Read(buf, endianness, &x)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", fieldName, err)\n\t\t}\n\n\t\tsizeMap[fieldName] = int(x)\n\t\tv.SetInt(x)\n\tcase reflect.Float32:\n\t\t_, hasBits, _ := getBits(tag, map[string]int{}, 32, 0)\n\t\tif hasBits {\n\t\t\treturn fmt.Errorf(\"bits not supported with float32: %v\", fieldName)\n\t\t}\n\n\t\tvar x float32\n\t\tif err := binary.Read(buf, endianness, &x); err != nil {\n\t\t\treturn fmt.Errorf(\"expected to read float32 from %v: %v\", fieldName, err)\n\t\t}\n\n\t\tv.SetFloat(float64(x))\n\tcase reflect.Float64:\n\t\t_, hasBits, _ := getBits(tag, map[string]int{}, 64, 0)\n\t\tif hasBits {\n\t\t\treturn fmt.Errorf(\"bits not supported with float64: %v\", fieldName)\n\t\t}\n\n\t\tvar x float64\n\t\tif err := binary.Read(buf, endianness, &x); err != nil {\n\t\t\treturn fmt.Errorf(\"expected to read float64 from %v: %v\", fieldName, err)\n\t\t}\n\n\t\tv.SetFloat(x)\n\tdefault:\n\t\treturn fmt.Errorf(\"%v not supported\", t)\n\t}\n\n\treturn nil\n}", "func (d *decoder) Decode(s *bufio.Scanner) (obj interface{}, err error) {\n\tb, err := ReadBytes(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(b) == 0 {\n\t\tlog.Err(\"empty or malformed payload: %q\", b)\n\t\treturn nil, ErrBadMsg\n\t}\n\n\tswitch b[0] {\n\tcase STRING:\n\t\treturn decodeString(b)\n\tcase INT:\n\t\treturn decodeInt(b)\n\tcase NIL:\n\t\treturn nil, decodeNil(s)\n\tcase SLICE:\n\t\treturn d.decodeSlice(b, s)\n\tcase MAP:\n\t\treturn d.decodeMap(b, s)\n\tcase ERROR:\n\t\treturn decodeErr(b)\n\t}\n\n\tlog.Err(\"unsupported payload type: %q\", b)\n\treturn nil, ErrUnsupportedType\n}", "func decodeBody(req *http.Request, out interface{}, cb func(interface{}) error) error {\n\t// This generally only happens in tests since real HTTP requests set\n\t// a non-nil body with no content. We guard against it anyways to prevent\n\t// a panic. The EOF response is the same behavior as an empty reader.\n\tif req.Body == nil {\n\t\treturn io.EOF\n\t}\n\n\tvar raw interface{}\n\tdec := json.NewDecoder(req.Body)\n\tif err := dec.Decode(&raw); err != nil {\n\t\treturn err\n\t}\n\n\t// Invoke the callback prior to decode\n\tif cb != nil {\n\t\tif err := cb(raw); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdecodeConf := &mapstructure.DecoderConfig{\n\t\tDecodeHook: mapstructure.ComposeDecodeHookFunc(\n\t\t\tmapstructure.StringToTimeDurationHookFunc(),\n\t\t\tstringToReadableDurationFunc(),\n\t\t),\n\t\tResult: &out,\n\t}\n\n\tdecoder, err := mapstructure.NewDecoder(decodeConf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn decoder.Decode(raw)\n}", "func (dec *Decoder) decode(_ Code, _ reflect.Value) {\n\tdefer catchError(&dec.err)\n}", "func (b *NetworkTypeBox) Decode(buf *bin.Buffer) error {\n\tif b == nil {\n\t\treturn fmt.Errorf(\"unable to decode NetworkTypeBox to nil\")\n\t}\n\tv, err := DecodeNetworkType(buf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to decode boxed value: %w\", err)\n\t}\n\tb.NetworkType = v\n\treturn nil\n}", "func decodeInternal(kind int, data []byte, v interface{}) (err error) {\n\tdefer handleAbort(&err)\n\tvalue, ok := v.(reflect.Value)\n\tif !ok {\n\t\tvalue = reflect.ValueOf(v)\n\t\tswitch value.Kind() {\n\t\tcase reflect.Map:\n\t\t\tif value.IsNil() {\n\t\t\t\treturn errors.New(\"bson: Decode map arg must not be nil.\")\n\t\t\t}\n\t\tcase reflect.Ptr:\n\t\t\tif value.IsNil() {\n\t\t\t\treturn errors.New(\"bson: Decode pointer arg must not be nil.\")\n\t\t\t}\n\t\t\tvalue = value.Elem()\n\t\tdefault:\n\t\t\treturn errors.New(\"bson: Decode arg must be pointer or map.\")\n\t\t}\n\t}\n\n\td := decodeState{data: data}\n\td.decodeValue(kind, value)\n\treturn d.savedError\n}", "func (d *Decoder) Decode(b []byte) (interface{}, error) {\n\tnv := reflect.New(d.Type).Interface()\n\tif err := d.Func(b, nv); err != nil {\n\t\treturn nil, err\n\t}\n\tptr := reflect.ValueOf(nv)\n\treturn ptr.Elem().Interface(), nil\n}", "func decodeTuple(a *DatumAlloc, tupTyp *types.T, b []byte) (tree.Datum, []byte, error) {\n\tb, _, _, err := encoding.DecodeNonsortingUvarint(b)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresult := tree.DTuple{\n\t\tD: a.NewDatums(len(tupTyp.TupleContents())),\n\t}\n\n\tvar datum tree.Datum\n\tfor i := range tupTyp.TupleContents() {\n\t\tdatum, b, err = DecodeTableValue(a, tupTyp.TupleContents()[i], b)\n\t\tif err != nil {\n\t\t\treturn nil, b, err\n\t\t}\n\t\tresult.D[i] = datum\n\t}\n\treturn a.NewDTuple(result), b, nil\n}", "func (codec TypeLengthValueCodec) Decode(raw net.Conn) (Message, error) {\n\tbyteChan := make(chan []byte)\n\terrorChan := make(chan error)\n\n\tgo func(bc chan []byte, ec chan error) {\n\t\ttypeData := make([]byte, MessageTypeBytes)\n\t\t_, err := io.ReadFull(raw, typeData)\n\t\tif err != nil {\n\t\t\tec <- err\n\t\t\tclose(bc)\n\t\t\tclose(ec)\n\t\t\tlogger.Debugln(\"go-routine read message type exited\", err)\n\t\t\treturn\n\t\t}\n\t\tbc <- typeData\n\t}(byteChan, errorChan)\n\n\tvar typeBytes []byte\n\n\tselect {\n\tcase err := <-errorChan:\n\t\treturn nil, err\n\n\tcase typeBytes = <-byteChan:\n\t\tif typeBytes == nil {\n\t\t\treturn nil, ErrBadData\n\t\t}\n\t\ttypeBuf := bytes.NewReader(typeBytes)\n\t\tvar msgType uint16\n\t\tif err := binary.Read(typeBuf, binary.BigEndian, &msgType); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttypehandler := GetTypeHandlerFunc(msgType)\n\t\theadhandler := GetHeadHandlerFunc(msgType)\n\t\tunmarshaler := GetUnmarshalFunc(msgType)\n\t\tif typehandler == nil || headhandler == nil || unmarshaler == nil {\n\t\t\treturn nil, ErrUndefined(msgType)\n\t\t}\n\n\t\tmsgHeadLen, errtype := typehandler(msgType)\n\t\tif errtype != nil {\n\t\t\treturn nil, errtype\n\t\t}\n\n\t\tlengthBytes := make([]byte, msgHeadLen)\n\t\t_, err := io.ReadFull(raw, lengthBytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\theadBytes := append(typeBytes, lengthBytes...)\n\t\tvar msgLen uint\n\t\tmsgLen, err = headhandler(headBytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif msgLen > MessageMaxBytes {\n\t\t\tlogger.Errorf(\"message(type %d) has bytes(%d) beyond max %d\\n\", msgType, msgLen, MessageMaxBytes)\n\t\t\treturn nil, ErrBadData\n\t\t}\n\n\t\t// read application data\n\t\tmsgBytes := make([]byte, msgLen)\n\t\t_, err = io.ReadFull(raw, msgBytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// deserialize message from bytes\n\t\treturn unmarshaler(append(headBytes, msgBytes...))\n\t}\n}", "func Unpack(b []byte) (Tuple, error) {\n\tvar t Tuple\n\n\tvar i int\n\n\tfor i < len(b) {\n\t\tvar el interface{}\n\t\tvar off int\n\n\t\tswitch {\n\t\tcase b[i] == 0x00:\n\t\t\tel = nil\n\t\t\toff = 1\n\t\tcase b[i] == 0x01:\n\t\t\tel, off = decodeBytes(b[i:])\n\t\tcase b[i] == 0x02:\n\t\t\tel, off = decodeString(b[i:])\n\t\tcase 0x0c <= b[i] && b[i] <= 0x1c:\n\t\t\tel, off = decodeInt(b[i:])\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unable to decode tuple element with unknown typecode %02x\", b[i])\n\t\t}\n\n\t\tt = append(t, el)\n\t\ti += off\n\t}\n\n\treturn t, nil\n}", "func gobDecode(buf []byte, into interface{}) error {\n\tif buf == nil {\n\t\treturn nil\n\t}\n\tdec := gob.NewDecoder(bytes.NewReader(buf))\n\treturn dec.Decode(into)\n}", "func (float64DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictFloat64Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainFloat64Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func Decode(m interface{}, val interface{}) error {\n\tif err := check(val); err != nil {\n\t\treturn err\n\t}\n\n\treturn decode(m, reflect.ValueOf(val).Elem())\n}", "func (o FunctionInputResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FunctionInputResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func (c Converter) Decode(r io.ReadSeeker) (interface{}, error) {\n\treturn nil, errors.New(\"decoding dhall files is not implemented yet\")\n}", "func (byteArrayDecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictByteArrayDecoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainByteArrayDecoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tcase parquet.Encodings.DeltaLengthByteArray:\n\t\tif mem == nil {\n\t\t\tmem = memory.DefaultAllocator\n\t\t}\n\t\treturn &DeltaLengthByteArrayDecoder{\n\t\t\tdecoder: newDecoderBase(format.Encoding(e), descr),\n\t\t\tmem: mem,\n\t\t}\n\tcase parquet.Encodings.DeltaByteArray:\n\t\tif mem == nil {\n\t\t\tmem = memory.DefaultAllocator\n\t\t}\n\t\treturn &DeltaByteArrayDecoder{\n\t\t\tDeltaLengthByteArrayDecoder: &DeltaLengthByteArrayDecoder{\n\t\t\t\tdecoder: newDecoderBase(format.Encoding(e), descr),\n\t\t\t\tmem: mem,\n\t\t\t}}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func (o FunctionOutputResponsePtrOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FunctionOutputResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.DataType\n\t}).(pulumi.StringPtrOutput)\n}", "func packType(v interface{}) byte {\n\tswitch v.(type) {\n\tcase nil:\n\t\treturn ptNone\n\tcase string:\n\t\treturn ptString\n\tcase int32:\n\t\treturn ptInt\n\tcase float32:\n\t\treturn ptFloat\n\tcase uint32:\n\t\treturn ptPtr\n\tcase []uint16:\n\t\treturn ptWString\n\tcase color.NRGBA:\n\t\treturn ptColor\n\tcase uint64:\n\t\treturn ptUint64\n\tdefault:\n\t\tpanic(\"invalid vdf.Node\")\n\t}\n}", "func (d *Decoder) Decode(v interface{}) error {\n\treturn nil\n}", "func (int64DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictInt64Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainInt64Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tcase parquet.Encodings.DeltaBinaryPacked:\n\t\tif mem == nil {\n\t\t\tmem = memory.DefaultAllocator\n\t\t}\n\t\treturn &DeltaBitPackInt64Decoder{\n\t\t\tdeltaBitPackDecoder: &deltaBitPackDecoder{\n\t\t\t\tdecoder: newDecoderBase(format.Encoding(e), descr),\n\t\t\t\tmem: mem,\n\t\t\t}}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func (DictByteArrayDecoder) Type() parquet.Type {\n\treturn parquet.Types.ByteArray\n}", "func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func (dec *Decoder) DecodeValue(v reflect.Value) error {\n\tif v.IsValid() {\n\t\tif v.Kind() == reflect.Ptr && !v.IsNil() {\n\t\t\t// That's okay, we'll store through the pointer.\n\t\t} else if !v.CanSet() {\n\t\t\treturn errors.New(\"binpack: DecodeValue of unassignable value\")\n\t\t}\n\t}\n\n\tdec.buf.Reset() // In case data lingers from previous invocation.\n\tdec.err = nil\n\tid := dec.decodeType(false)\n\tif dec.err == nil {\n\t\tdec.decode(id, v)\n\t}\n\treturn dec.err\n}", "func decodeDecoderNoPtr(s *Stream, val reflect.Value) error {\n\treturn val.Addr().Interface().(Decoder).DecodeRLP(s)\n}", "func (c *coder) decoderForType(keyOrValue, typ string) (func(m json.RawMessage) ([]byte, error), error) {\n\tvar dec func(s string) ([]byte, error)\n\tswitch typ {\n\tcase \"json\":\n\t\t// Easy case - we already have the JSON-marshaled data.\n\t\treturn func(m json.RawMessage) ([]byte, error) {\n\t\t\treturn m, nil\n\t\t}, nil\n\tcase \"hex\":\n\t\tdec = hex.DecodeString\n\tcase \"base64\":\n\t\tdec = base64.StdEncoding.DecodeString\n\tcase \"string\":\n\t\tdec = func(s string) ([]byte, error) {\n\t\t\treturn []byte(s), nil\n\t\t}\n\tcase \"avro\":\n\t\treturn c.makeAvroDecoder(keyOrValue), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(`unsupported decoder %#v, only json, string, hex and base64 are supported`, typ)\n\t}\n\treturn func(m json.RawMessage) ([]byte, error) {\n\t\tvar s string\n\t\tif err := json.Unmarshal(m, &s); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn dec(s)\n\t}, nil\n}", "func (c Coder) Type() FullType {\n\tif !c.IsValid() {\n\t\tpanic(\"Invalid Coder\")\n\t}\n\treturn c.coder.T\n}", "func (fixedLenByteArrayDecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictFixedLenByteArrayDecoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainFixedLenByteArrayDecoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func (p Packet) TypeData() []byte {\n\tl := p.Len()\n\tif l < EapMsgData {\n\t\treturn nil\n\t}\n\treturn p[EapMsgData:l]\n}", "func (tv *TypedBytes) ValueType() ValueType {\n\treturn tv.Type\n}", "func textUnmarshalerDecode(\n\tinputType reflect.Type, outputType reflect.Type, data interface{},\n) (interface{}, error) {\n\tif !reflect.PtrTo(outputType).Implements(stringUnmarshalerType) {\n\t\treturn data, nil\n\t}\n\tvalue, ok := data.(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid type %v\", inputType)\n\t}\n\tparsedValue, ok := reflect.New(outputType).Interface().(stringUnmarshaler)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid output type %v\", outputType)\n\t}\n\terr := parsedValue.Decode(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parsedValue, nil\n}" ]
[ "0.68078977", "0.6675602", "0.6515179", "0.61647207", "0.59279156", "0.59279156", "0.5900919", "0.5871402", "0.58649015", "0.5724581", "0.57092345", "0.56915516", "0.56915516", "0.5660723", "0.5632208", "0.5619711", "0.56132174", "0.5569318", "0.5551497", "0.5543591", "0.5518003", "0.55002475", "0.5489063", "0.54775524", "0.5448354", "0.5446588", "0.54434526", "0.54296786", "0.5407587", "0.5401388", "0.5397758", "0.5384884", "0.5367072", "0.53600675", "0.5351444", "0.535092", "0.53441095", "0.5342473", "0.53392214", "0.53295565", "0.53263885", "0.5326222", "0.53124434", "0.5302911", "0.5280203", "0.5237756", "0.5233342", "0.52221805", "0.52207243", "0.52187747", "0.521571", "0.5208564", "0.5207834", "0.5196438", "0.5188107", "0.5174159", "0.5169391", "0.5165359", "0.5151099", "0.51482075", "0.51406884", "0.51232034", "0.51161236", "0.5113956", "0.50892884", "0.5085349", "0.50749475", "0.5065027", "0.505782", "0.505782", "0.50571334", "0.50558", "0.5055791", "0.5055242", "0.50532407", "0.50528646", "0.50303364", "0.5025728", "0.5015919", "0.5003632", "0.4994155", "0.4967525", "0.49625507", "0.4948846", "0.49455613", "0.49444053", "0.49396157", "0.4939242", "0.49375728", "0.49365774", "0.4935658", "0.49279144", "0.49253282", "0.4921455", "0.4919739", "0.49136716", "0.49053147", "0.49035594", "0.49027398", "0.48966768" ]
0.6891714
0
check token expiration; used in middleware for protected routes
func ValidateToken(r *http.Request) error { token, err := VerifyToken(r) if err != nil { return err } if _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Manager) isTokenExpired(token *Token) bool {\n\tif !m.bearerAuth {\n\t\treturn false\n\t}\n\tunixTime := time.Now().Unix()\n\treturn token.Expires < unixTime\n}", "func (t TToken) checkExpired() error {\n\texp, err := t.getExpiry()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif secondsPassed := time.Since(exp).Seconds(); secondsPassed > 30 {\n\t\treturn errors.New(\"token expired more than 30 seconds ago (#) \")\n\t}\n\treturn nil\n}", "func isTokenExpired(jwtData *JWTData) bool {\n\n\tnowTime := time.Now().Unix()\n\texpireTime := int64(jwtData.Exp)\n\n\tif expireTime < nowTime {\n\t\tlog.Warnf(\"Token is expired!\")\n\t\treturn true\n\t}\n\n\treturn false\n}", "func ValidateToken(pathHandler server.HandlerType) server.HandlerType {\n\treturn func(res http.ResponseWriter, req *http.Request) {\n\t\tlog.Printf(\"ValidateToken Received request: %v\", req)\n\t\tprovidedToken := req.Header.Get(tokenRequestHeader)\n\t\tif providedToken == \"\" {\n\t\t\tlog.Println(\"Token required; No token provided.\")\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\n\t\tif actualToken, ok := generatedTokens[providedToken]; ok {\n\t\t\taccessTime := time.Now()\n\t\t\tduration := accessTime.Sub(actualToken.CreatedAt)\n\t\t\tif int(duration.Seconds()) >= actualToken.TTL {\n\t\t\t\tlog.Println(\"Token has expired\")\n\t\t\t\tdelete(generatedTokens, providedToken)\n\t\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"Token validated!\")\n\t\t\tpathHandler(res, req)\n\t\t} else {\n\t\t\tlog.Printf(\"Invalid token provided: %v\", providedToken)\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\t}\n}", "func expired(token *Token) bool {\n\tif token.Expires.IsZero() && len(token.Access) != 0 {\n\t\treturn false\n\t}\n\treturn token.Expires.Add(-expiryDelta).\n\t\tBefore(time.Now())\n}", "func CheckTheValidityOfTheToken(token string) (newToken string, err error) {\n\n err = checkInit()\n if err != nil {\n return\n }\n\n err = createError(011)\n\n if v, ok := tokens[token]; ok {\n var expires = v.(map[string]interface{})[\"expires\"].(time.Time)\n var userID = v.(map[string]interface{})[\"id\"].(string)\n\n if expires.Sub(time.Now().Local()) < 0 {\n return\n }\n\n newToken = setToken(userID, token)\n\n err = nil\n\n } else {\n return\n }\n\n return\n}", "func (request *AccessToken) HasExpired() bool {\n\tcurrentTime := time.Now().Unix()\n\treturn currentTime > request.ExpiresAt\n}", "func getTokenValidTo() time.Time {\n\tnow := time.Now().UTC()\n\tvalue, ok := os.LookupEnv(TOKEN_EXPIRIY_MINUTES)\n\tif !ok {\n\t\tlog.Fatal(\"Could not find token expiry time\")\n\t}\n\tvalidMinutes, err := strconv.Atoi(value)\n\tutil.CheckErrFatal(err)\n\treturn now.Add(time.Minute * time.Duration(validMinutes))\n}", "func isTokenExpired(token string) bool {\n\t// Parse token\n\tclaims := jwtgo.MapClaims{}\n\n\tparsedToken, _, err := new(jwtgo.Parser).ParseUnverified(token, claims)\n\tif err != nil {\n\t\treturn true\n\t}\n\n\terr = parsedToken.Claims.Valid()\n\tif err != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func ValidateRefreshToken(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttoken := []modal.Token{}\n\n\t\tif _, ok := r.Header[\"Authorization\"]; !ok {\n\t\t\trefreshTokenRequired.SendAPI(w, nil)\n\t\t\treturn\n\t\t}\n\n\t\ts := strings.Split(r.Header[\"Authorization\"][0], \" \")\n\t\tdb := GetDBInstance()\n\t\tdefer db.Close()\n\t\terr := db.Select(&token, \"SELECT * FROM tokens WHERE refresh_token='\"+s[1]+\"'\")\n\t\tif err != nil {\n\t\t\tfmt.Println(\"db error\", err)\n\t\t}\n\n\t\tif len(token) <= 0 {\n\t\t\trefreshTokenInvalid.SendAPI(w, nil)\n\t\t\treturn\n\t\t}\n\n\t\tRTime := ParseTimestamp(token[0].RefreshTokenTime)\n\t\trefreshDuration, _ := time.ParseDuration(strconv.Itoa(token[0].RefreshTokenExpiry) + \"s\")\n\n\t\tif RTime.Add(refreshDuration).Unix() < time.Now().Unix() {\n\t\t\trefreshTokenExpired.SendAPI(w, nil)\n\t\t\treturn\n\t\t}\n\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func CheckAndUpdateToken(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttoken := r.Header.Get(tokenHeaderName)\n\t\tif token == \"\" {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tti, err := controller.Token.GetInfo(token)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tcontroller.Token.UpdateTTL(ti)\n\t\tctx := r.Context()\n\t\tctx = context.WithValue(ctx, LoginKey, ti.Login)\n\t\thandler.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (auth *Authorization) tokenIsValid() bool {\n\tif auth.AccessToken == \"\" {\n\t\treturn false\n\t}\n\treturn time.Now().Unix() < auth.TokenExpires\n}", "func TokenExpiresIn(ctx context.Context, t *oauth2.Token, lifetime time.Duration) bool {\n\tif t == nil || t.AccessToken == \"\" {\n\t\treturn true\n\t}\n\tif t.Expiry.IsZero() {\n\t\treturn false\n\t}\n\texpiry := t.Expiry.Add(-lifetime)\n\treturn expiry.Before(clock.Now(ctx))\n}", "func (c *CSRFStore) expired() bool {\n\treturn c.token == nil || time.Now().After(c.token.ExpiresAt)\n\n}", "func (token Token) Valid(maxAge float64) bool {\n\tnow := time.Now().UTC()\n\tdifference := now.Sub(token.Timestamp)\n\treturn difference.Seconds() <= maxAge\n}", "func (token UserToken) Valid() (err error) {\n\tif token.IssureAt+token.Expire <= time.Now().Unix() {\n\t\treturn jwt.NewValidationError(\"token is expired\", jwt.ValidationErrorExpired)\n\t}\n\treturn nil\n}", "func ValidateToken(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tsession := GetSession(w, req, cookieName)\n\t\taccessToken, setbool := session.Values[\"access_token\"].(string)\n\t\tif setbool == true && accessToken == \"\" {\n\t\t\tRedirectLogin(w, req)\n\t\t\t//return\n\t\t} else if setbool == false {\n\t\t\tRedirectLogin(w, req)\n\t\t} else {\n\t\t\tvar p jwt.Parser\n\t\t\ttoken, _, _ := p.ParseUnverified(accessToken, &jwt.StandardClaims{})\n\t\t\tif err := token.Claims.Valid(); err != nil {\n\t\t\t\t//invalid\n\t\t\t\tRedirectLogin(w, req)\n\t\t\t\t//return\n\t\t\t} else {\n\t\t\t\t//valid\n\t\t\t\tnext(w, req)\n\t\t\t\t//return\n\t\t\t}\n\t\t}\n\t\t//RedirectLogin(w, r)\n\t\treturn\n\t})\n}", "func validatetoken(dao DAO) echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\n\t\t\tcook, err := c.Cookie(\"jwt\")\n\t\t\tif err != nil {\n\t\t\t\tif err == http.ErrNoCookie {\n\t\t\t\t\t// If the cookie is not set, return an unauthorized status\n\t\t\t\t\treturn c.String(http.StatusUnauthorized, \"You are not authorized\")\n\t\t\t\t}\n\t\t\t\t// For any other type of error, return a bad request status\n\t\t\t\treturn c.String(http.StatusBadRequest, \"Bad Request A\")\n\t\t\t}\n\n\t\t\t// Get the JWT string from the cookie\n\t\t\ttknStr := cook.Value\n\n\t\t\t// Initialize a new instance of `Claims`\n\t\t\tclaims := &m.Claims{}\n\n\t\t\t// Parse the JWT string and store the result in `claims`.\n\t\t\t// Note that we are passing the key in this method as well. This method will return an error\n\t\t\t// if the token is invalid (if it has expired according to the expiry time we set on sign in),\n\t\t\t// or if the signature does not match\n\t\t\ttkn, err := jwt.ParseWithClaims(tknStr, claims, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\treturn jwtKey, nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t\t\treturn c.String(http.StatusUnauthorized, fmt.Sprintf(\"You are Not Authorized %v:%s:%v\", err, err.Error(), jwt.ErrSignatureInvalid))\n\t\t\t\t}\n\t\t\t\treturn c.String(http.StatusBadRequest, fmt.Sprintf(\"Bad Request B %v:%s:%v\", err, err.Error(), jwt.ErrSignatureInvalid))\n\t\t\t}\n\t\t\tif !tkn.Valid {\n\t\t\t\treturn c.String(http.StatusUnauthorized, \"You are not authorized\")\n\t\t\t}\n\n\t\t\tprofilesexist, err := dao.DoesProfileExist(claims.ProfileId)\n\t\t\tif err != nil || !profilesexist {\n\t\t\t\treturn c.String(http.StatusUnauthorized, \"You are not authorized: 10101\")\n\t\t\t}\n\n\t\t\t// We ensure that a new token is not issued until enough time has elapsed\n\t\t\t// In this case, a new token will only be issued if the old token is within\n\t\t\t// 30 seconds of expiry. otherwise.. leave everything be\n\t\t\tif time.Unix(claims.ExpiresAt, 0).Sub(time.Now()) < 30*time.Second {\n\n\t\t\t\t// Now, create a new token for the current use, with a renewed expiration time\n\t\t\t\texpirationTime := time.Now().Add(5 * time.Minute)\n\t\t\t\tclaims.ExpiresAt = expirationTime.Unix()\n\t\t\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t\t\t\ttokenString, err := token.SignedString(jwtKey)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn c.String(http.StatusInternalServerError, \"Crazy ass internal error\")\n\t\t\t\t}\n\t\t\t\tc.SetCookie(&http.Cookie{\n\t\t\t\t\tName: \"jwt\",\n\t\t\t\t\tValue: tokenString,\n\t\t\t\t\tExpires: expirationTime,\n\t\t\t\t})\n\n\t\t\t}\n\t\t\treturn next(c)\n\t\t}\n\t}\n}", "func (middleware *Middleware) CheckIfTokenExpire(c controller.MContext) (*CustomClaims, error) {\n\ttoken, err := middleware.ParseToken(c)\n\tif err != nil {\n\t\tvalidationErr, ok := err.(*jwt.ValidationError)\n\t\tif !ok || validationErr.Errors != jwt.ValidationErrorExpired {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif token == nil {\n\t\treturn nil, ErrEmptyAuthHeader\n\t}\n\n\tclaims := token.Claims.(*CustomClaims)\n\n\tif claims.ExpiresAt < jwt.TimeFunc().Unix() {\n\t\treturn nil, ErrExpiredToken\n\t}\n\n\treturn claims, nil\n}", "func (t *Token) validate(nbfCheck bool) (err error) {\n\tvar zu = uuid.UUID{}\n\tvar ve = &ValidationError{}\n\tvar now = Timestamp(time.Now().Unix())\n\n\tif bytes.Equal(zu[:], t.Id[:]) {\n\t\tve.append(\"Token.Id is invalid (zero-UUID)\")\n\t}\n\n\tif bytes.Equal(zu[:], t.Subject[:]) {\n\t\tve.append(\"Token.Subject is invalid (zero-UUID)\")\n\t}\n\n\tif 0 == t.Issued {\n\t\tve.append(\"Token.Issued is invalid (zero-Timestamp)\")\n\t} else if t.Issued > now {\n\t\tve.append(\"Token.Issued is > time.Now()\")\n\t}\n\n\tif t.Expires != 0 && t.Expires < (now+5) {\n\t\tve.append(fmt.Sprintf(\n\t\t\t\"Token.Expires is < time.Now(); expired %v\",\n\t\t\tt.Expires.Time().String()))\n\t\tve.exp = true\n\t}\n\n\tif nbfCheck &&\n\t\tt.NotBefore != 0 &&\n\t\tint64(t.NotBefore) > (time.Now().Unix()-5) {\n\t\tve.append(fmt.Sprintf(\n\t\t\t\"Token.NotBefore is < time.Now(); not before %v\",\n\t\t\tt.Expires.Time().String()))\n\t\tve.nbf = true\n\t}\n\n\tif 0 != len(ve.errstrs) || ve.exp || ve.nbf {\n\t\terr = ve\n\t}\n\n\treturn\n}", "func (x *EndPoints) CheckToken(client *firestore.Client) http.HandlerFunc {\n\treturn func(res http.ResponseWriter, req *http.Request) {\n\n\t\tvalid := verifyToken(req)\n\t\tif valid != true {\n\t\t\tutils.ResClearSite(&res)\n\t\t\tutils.ResError(res, http.StatusUnauthorized, errors.New(\"INVALID TOKEN\"))\n\t\t\treturn\n\t\t}\n\n\t\tutils.ResOK(res, \"TOKEN VALID\")\n\t}\n}", "func (t *MongoDBToken) IsExpired() bool {\n\treturn time.Now().UTC().Unix() >= t.Expired.Unix()\n}", "func (s *Service) CheckLimitsMiddlware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctxToken := r.Context().Value(ctxToken)\n\t\tif ctxToken == nil {\n\t\t\ts.writeError(w, errInternalServerError.msg(\"CheckLimitsMiddlware failed to get token from context\"))\n\t\t\treturn\n\t\t}\n\t\ttoken := ctxToken.(string)\n\t\tnow := time.Now()\n\t\tcachedToken, ok := s.Throttler.cache[token]\n\t\tif !ok {\n\t\t\ts.Throttler.cache[token] = &requester{\n\t\t\t\ttoken: token,\n\t\t\t\tcounter: 0,\n\t\t\t\tendTime: now.Add(time.Duration(s.Throttler.M) * time.Millisecond),\n\t\t\t}\n\t\t\tcachedToken = s.Throttler.cache[token]\n\t\t}\n\t\tcachedToken.counter++\n\n\t\tlog.Infof(\"cached token: %s\\n\", cachedToken.token)\n\n\t\tif cachedToken.counter > s.Throttler.N {\n\t\t\tif now.Before(cachedToken.endTime) { // limit reached already\n\t\t\t\ttimeLeft := cachedToken.endTime.Sub(now).Seconds() * 1000\n\t\t\t\ts.writeError(w, &Error{Code: http.StatusTooManyRequests, Message: fmt.Sprintf(\"Too many requests: %.2fms left until reset\", timeLeft)})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcachedToken.counter = 1\n\t\t\tcachedToken.endTime = now.Add(time.Duration(s.Throttler.M) * time.Millisecond)\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\n\t})\n}", "func (t *Token) Expired() bool {\n\treturn time.Now().Unix() >= t.ExpiredAt\n}", "func (claims Claims) checkExpires(now int64, tag string) error {\n\tif claims.Has(tag) {\n\t\texp := claims[tag]\n\t\tvar target int64\n\t\tswitch val := exp.(type) {\n\t\tcase float32:\n\t\t\ttarget = int64(val)\n\t\tcase float64:\n\t\t\ttarget = int64(val)\n\t\tcase int8:\n\t\t\ttarget = int64(val)\n\t\tcase int16:\n\t\t\ttarget = int64(val)\n\t\tcase int:\n\t\t\ttarget = int64(val)\n\t\tcase int32:\n\t\t\ttarget = int64(val)\n\t\tcase int64:\n\t\t\ttarget = val\n\t\tcase uint8:\n\t\t\ttarget = int64(val)\n\t\tcase uint16:\n\t\t\ttarget = int64(val)\n\t\tcase uint32:\n\t\t\ttarget = int64(val)\n\t\tcase uint:\n\t\t\ttarget = int64(val)\n\t\tcase uint64:\n\t\t\ttarget = int64(val)\n\t\tdefault:\n\t\t\treturn errClaimValueInvalid\n\t\t}\n\t\tif tag == notBeforeAt && now < target {\n\t\t\treturn errClaimValueInvalid\n\t\t}\n\t\tif tag == expiresAt && now > target {\n\t\t\treturn errClaimValueInvalid\n\t\t}\n\t\tif tag == issuedAt && now < target {\n\t\t\treturn errClaimValueInvalid\n\t\t}\n\t} else {\n\t\treturn errClaimValueInvalid\n\t}\n\treturn nil\n}", "func TestLoginExpires(t *testing.T) {\n\tc, _, secret := loginHelper(t, \"2s\")\n\n\t// Make sure token is valid\n\t_, err := c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Wait for TTL to expire and check that token is no longer valid\n\ttime.Sleep(time.Duration(secret.LeaseDuration+1) * time.Second)\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"API call should fail, but token did not expire\")\n\t}\n}", "func checkToken(w http.ResponseWriter, r *http.Request) bool {\n\tc, err := r.Cookie(\"token\")\n\tif err != nil {\n\t\tif err == http.ErrNoCookie {\n\t\t\t// If the cookie is not set, return an unauthorized status\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn false\n\t\t}\n\t\t// For any other type of error, return a bad request status\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn false\n\t}\n\n\t// Get the JWT string from the cookie\n\ttknStr := c.Value\n\n\t// Initialize a new instance of `Claims`\n\tclaims := &s.Claims{}\n\n\ttkn, err := jwt.ParseWithClaims(tknStr, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn JWTKey, nil\n\t})\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn false\n\t\t}\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn false\n\t}\n\tif !tkn.Valid {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn false\n\t}\n\treturn true\n}", "func IsValidToken(token *oauth2.Token) bool {\n\tcurrent := time.Now()\n\treturn token.Expiry.After(current)\n}", "func verifyToken(reqToken string) *User {\n\tlog.Println(\"verify \", reqToken)\n\tlog.Println(reqToken)\n\tif reqToken == \"\" {\n\t\tlog.Println(\"here\")\n\t\treturn nil\n\t}\n\ttoken, err := jwt.Parse(reqToken, func(t *jwt.Token) (interface{}, error) {\n\t\treturn []byte(SecretKey), nil\n\t})\n\n\tif err != nil {\n\t\treturn nil\n\t}\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\t// check if claims are true\n\t\tusername := claims[\"username\"]\n\n\t\t// check if username exists\n\t\tvar userInDB User\n\t\tlog.Println(\"verify username\", username)\n\t\tapp.db.Where(\"username = ?\", username).First(&userInDB)\n\n\t\t// username not found\n\t\tif userInDB.Username == nil {\n\t\t\tlog.Println(\"1 returning\")\n\t\t\treturn nil\n\t\t}\n\n\t\t// now check if current time is less than expiratin time\n\t\tunixTime, err := strconv.ParseInt(claims[\"exp\"].(string), 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Println(\"2 returning\")\n\t\t\treturn nil\n\t\t}\n\n\t\texpirationTime := time.Unix(unixTime, 0)\n\t\tlog.Println(expirationTime)\n\t\tif expirationTime.After(time.Now()) {\n\t\t\treturn &userInDB\n\t\t}\n\n\t\tlog.Println(\"3 returning\")\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func TokenValidation(token string) bool {\n\tvar flag bool\n\tflag = false\n\tfor _, v := range tokenDetails {\n\t\tif token == v.Token {\n\t\t\tflag = true\n\t\t\tif time.Since(v.LastUsed).Minutes() > pluginConfig.Data.SessionTimeoutInMinutes {\n\t\t\t\treturn flag\n\t\t\t}\n\t\t}\n\t}\n\treturn flag\n}", "func (r *OperationReqReconciler) isExpired(request *userv1.Operationrequest) bool {\n\tif request.Status.Phase != userv1.RequestCompleted && request.CreationTimestamp.Add(r.expirationTime).Before(time.Now()) {\n\t\tr.Logger.Info(\"operation request is expired\", \"name\", request.Name)\n\t\treturn true\n\t}\n\treturn false\n}", "func (c *TokenClaims) Valid() error {\n\tif c.ExpiredAt < time.Now().Unix() {\n\t\treturn ErrTokenExpired\n\t}\n\treturn nil\n}", "func (authProvider *ecrAuthProvider) IsTokenValid(authData *ecrapi.AuthorizationData) bool {\n\tif authData == nil || authData.ExpiresAt == nil {\n\t\treturn false\n\t}\n\n\trefreshTime := aws.TimeValue(authData.ExpiresAt).\n\t\tAdd(-1 * retry.AddJitter(MinimumJitterDuration, MinimumJitterDuration))\n\n\treturn time.Now().Before(refreshTime)\n}", "func TestExpiry(t *testing.T) {\n\t_, privateBytes, err := GenerateKeyPair()\n\trequire.NoError(t, err)\n\tprivateKey, err := utils.ParsePrivateKey(privateBytes)\n\trequire.NoError(t, err)\n\n\tclock := clockwork.NewFakeClockAt(time.Now())\n\n\t// Create a new key that can be used to sign and verify tokens.\n\tkey, err := New(&Config{\n\t\tClock: clock,\n\t\tPrivateKey: privateKey,\n\t\tAlgorithm: defaults.ApplicationTokenAlgorithm,\n\t\tClusterName: \"example.com\",\n\t})\n\trequire.NoError(t, err)\n\n\t// Sign a token with a 1 minute expiration.\n\ttoken, err := key.Sign(SignParams{\n\t\tUsername: \"[email protected]\",\n\t\tRoles: []string{\"foo\", \"bar\"},\n\t\tTraits: wrappers.Traits{\n\t\t\t\"trait1\": []string{\"value-1\", \"value-2\"},\n\t\t},\n\t\tExpires: clock.Now().Add(1 * time.Minute),\n\t\tURI: \"http://127.0.0.1:8080\",\n\t})\n\trequire.NoError(t, err)\n\n\t// Verify that the token is still valid.\n\tclaims, err := key.Verify(VerifyParams{\n\t\tUsername: \"[email protected]\",\n\t\tURI: \"http://127.0.0.1:8080\",\n\t\tRawToken: token,\n\t})\n\trequire.NoError(t, err)\n\trequire.Equal(t, claims.Username, \"[email protected]\")\n\trequire.Equal(t, claims.Roles, []string{\"foo\", \"bar\"})\n\trequire.Equal(t, claims.IssuedAt, josejwt.NewNumericDate(clock.Now()))\n\n\t// Advance time by two minutes and verify the token is no longer valid.\n\tclock.Advance(2 * time.Minute)\n\t_, err = key.Verify(VerifyParams{\n\t\tUsername: \"[email protected]\",\n\t\tURI: \"http://127.0.0.1:8080\",\n\t\tRawToken: token,\n\t})\n\trequire.Error(t, err)\n}", "func Token(app *container.Container) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\tvar (\n\t\t\tbearer *authorizationHeader = &authorizationHeader{}\n\t\t\tclaims *tokenModels.JWTClaims = &tokenModels.JWTClaims{}\n\t\t\tjwtToken *jwt.Token = &jwt.Token{}\n\t\t\ttoken *tokenModels.Token = &tokenModels.Token{}\n\t\t\tuser *userModels.User = &userModels.User{}\n\t\t)\n\n\t\t// validate authorization header\n\t\terr := c.ShouldBindWith(bearer, binding.Header)\n\t\tok, httpResponse := app.Facades.Error.ShouldContinue(err, &response.ErrValidation)\n\t\tif !ok {\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// split authorization header value, i.e. from Bearer xxx to [\"Bearer\", \"xxx\"]\n\t\tbearerHeader := strings.Split(bearer.Authorization, \" \")\n\t\tif len(bearerHeader) != 2 || bearerHeader[0] != \"Bearer\" {\n\t\t\thttpResponse := response.ErrTokenInvalid\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// validate the token\n\t\tjwtToken, err = app.Facades.Token.ParseWithClaims(bearerHeader[1], claims)\n\t\tok, httpResponse = app.Facades.Error.ShouldContinue(err, &response.ErrTokenInvalid)\n\t\tif !ok {\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// find the user\n\t\terr = app.Facades.User.BindByID(user, claims.Subject)\n\t\tok, httpResponse = app.Facades.Error.ShouldContinue(err, &response.ErrUserNotFound)\n\t\tif !ok {\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// explicitly check for expiry for both refresh and access types\n\t\t// remove if expired\n\t\tnow := time.Now()\n\t\ttokenExpiry := time.Unix(claims.ExpiresAt, 0)\n\n\t\t// if expiry is before now\n\t\tif tokenExpiry.Before(now) {\n\t\t\t// revoke the token\n\t\t\tapp.Facades.User.RevokeTokenByID(claims.ID.String())\n\n\t\t\t// respond\n\t\t\thttpResponse := response.ErrTokenExpired\n\t\t\t// serverErr := logging.NewServerError(err).BindClientErr(httpResponse)\n\t\t\t// c.Error(serverErr)\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// We'll only allow the access token flow in this middleware. If the user has\n\t\t// a refresh token, they should go through a different flow. For example, exchanging\n\t\t// their refresh token for a new access token.\n\t\tif claims.TokenType != enums.JWTTokenTypeAccess {\n\t\t\thttpResponse := response.ErrTokenTypeInvalid\n\t\t\t// serverErr := logging.NewServerError(nil).BindClientErr(httpResponse)\n\t\t\t// c.Error(serverErr)\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// We'll disallow invalid tokens, too.\n\t\tif !jwtToken.Valid {\n\t\t\tfmt.Println(\"here it is\")\n\t\t\thttpResponse := response.ErrTokenInvalid\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// lastly we'll check to see if the token is in the whitelist\n\t\terr = app.Facades.Token.BindByID(token, claims.ID)\n\t\tif err != nil {\n\t\t\thttpResponse := response.ErrTokenNotFound\n\t\t\t// serverErr := logging.NewServerError(err).BindClientErr(httpResponse)\n\t\t\t// c.Error(serverErr)\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// store user in the handler dependencies\n\t\tapp.Current.User = user\n\n\t\t// store the token so we can revoke tokens related to the session\n\t\tapp.Current.Token = token\n\n\t\tc.Next()\n\t}\n}", "func (m JwtMiddleware) Check(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tc, err := r.Cookie(\"token\")\n\t\tif err != nil {\n\t\t\tif errors.Is(err, http.ErrNoCookie) {\n\t\t\t\t// If the cookie is not set, return an unauthorized status\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\tlog.Printf(\"[%s]: no cookie, request non authorized\", http.StatusText(http.StatusUnauthorized))\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// For any other type of error, return a bad request status\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tapiError := common.APIError{\n\t\t\t\tError: \"invalid_client\",\n\t\t\t\tErrorDescription: fmt.Sprintf(\"%s\", \"Cannot retrieve authorization\"),\n\t\t\t}\n\t\t\tjson, _ := json.Marshal(apiError)\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t_, _ = w.Write(json)\n\t\t\tlog.Printf(\"[%s]: cannot retrieve cookie token\", http.StatusText(http.StatusUnauthorized))\n\t\t\treturn\n\t\t}\n\t\tlogger.Logger.Debugf(\"Checking for token [%s]\", c.Value)\n\t\tif valid, err := m.JwtService.Verify(\"Bearer \" + c.Value); err != nil || !valid {\n\t\t\tif errors.Is(err, jwt.ErrSignature) {\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t} else {\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func (t *token) IsExpired() bool {\n\tif t == nil {\n\t\treturn true\n\t}\n\treturn t.Expired()\n}", "func (err *ValidationError) IsExpired() bool { return err.exp }", "func (h *ValidationHelper) ValidateExpiresAt(exp *Time) error {\n\t// 'exp' claim is not set. ignore.\n\tif exp == nil {\n\t\treturn nil\n\t}\n\n\t// Expiration has passed\n\tif h.After(exp.Time) {\n\t\tdelta := h.now().Sub(exp.Time)\n\t\treturn &TokenExpiredError{At: h.now(), ExpiredBy: delta}\n\t}\n\n\t// Expiration has not passed\n\treturn nil\n}", "func (i *info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func (t *cachedToken) usable() bool {\n\treturn t.token != \"\" || time.Now().Add(30*time.Second).Before(t.expiry)\n}", "func TestExpiration(t *testing.T) {\n\tr := NewRegistrar()\n\tr.Add(session)\n\ttime.Sleep(expireDuration)\n\tif r.Validate(user) {\n\t\tt.Error(\"The token has expired, but the user was still validated!\")\n\t}\n\tt.Log(\"The token expired, and the user was succesfully reported Invalid.\")\n}", "func InvalidToken(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.WriteHeader(http.StatusForbidden)\n\tfmt.Fprint(w, `Your token <strong>expired</strong>, click <a href=\"javascript:void(0)\" onclick=\"location.replace(document.referrer)\">here</a> to try again.`)\n}", "func Check() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvar code int\n\t\tvar token string\n\n\t\tcode = e.SUCCESS\n\t\trToken := c.Request.Header[\"Authorization\"]\n\n\t\tif len(rToken) < 1 {\n\t\t\tcode = e.ERROR_MISSING_TOKEN\n\t\t} else {\n\t\t\ttoken = rToken[0]\n\t\t\tsplitToken := strings.Split(token, \"Bearer\")\n\t\t\ttoken = strings.TrimSpace(splitToken[1])\n\n\t\t\tclaims, err := util.ParseToken(token)\n\t\t\tif err != nil {\n\t\t\t\tcode = e.ERROR_AUTH_CHECK_TOKEN_FAIL\n\t\t\t} else {\n\t\t\t\tif time.Now().Unix() > claims.ExpiresAt {\n\t\t\t\t\tcode = e.ERROR_AUTH_CHECK_TOKEN_TIMEOUT\n\t\t\t\t} else {\n\t\t\t\t\tc.Set(\"id_user\", claims.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif code != e.SUCCESS {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\n\t\t\t\t\"code\": code,\n\t\t\t\t\"msg\": e.GetMsg(code),\n\t\t\t})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\n\t\tc.Next()\n\n\t}\n}", "func (c ClaimsWithLeeway) Valid() error {\n\tvErr := new(jwt.ValidationError)\n\tnow := jwt.TimeFunc().Unix()\n\n\t// The claims below are optional, by default, so if they are set to the\n\t// default value in Go, let's not fail the verification for them.\n\tif c.VerifyExpiresAt(now, false) == false {\n\t\tdelta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))\n\t\tvErr.Inner = fmt.Errorf(\"token is expired by %v\", delta)\n\t\tlog.Println(\"token is expired by \" + delta.String())\n\t\tvErr.Errors |= jwt.ValidationErrorExpired\n\t}\n\n\tif c.VerifyIssuedAt(now, false) == false {\n\t\tvErr.Inner = fmt.Errorf(\"Token used before issued\")\n\t\tlog.Println(\"Token used before issued\")\n\t\tvErr.Errors |= jwt.ValidationErrorIssuedAt\n\t}\n\n\tif c.VerifyNotBefore(now, false) == false {\n\t\tvErr.Inner = fmt.Errorf(\"token is not valid yet\")\n\t\tlog.Println(\"token is not valid yet\")\n\t\tvErr.Errors |= jwt.ValidationErrorNotValidYet\n\t}\n\n\tif vErr.Errors == 0 {\n\t\treturn nil\n\t}\n\n\treturn vErr\n}", "func checkToken(request *Request) (bool, error) {\n\tif request.MethodName == \"auth\" {\n\t\treturn true, nil\n\t} else {\n\t\tif valid, err := model.CheckToken(request.Token); valid && err == nil {\n\t\t\treturn true, nil\n\t\t} else {\n\t\t\treturn false, err\n\t\t}\n\t}\n}", "func tokenAuthRequired() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tif sessions.Default(c).Get(\"userid\") == nil {\n\t\t\tc.Redirect(302, \"/login\")\n\t\t} else {\n\t\t\tc.Next()\n\t\t}\n\t}\n}", "func (token Token) IsValid() bool {\n\treturn len(token.AccessToken) > 0 && time.Now().UTC().Before(token.ExpiresOn)\n}", "func IsValid(req *http.Request) bool {\n\t// check if token exists in cookie\n\tcookie, err := req.Cookie(\"_token\")\n\tif err != nil {\n\t\treturn false\n\t}\n\t// validate it and allow or redirect request\n\ttoken := cookie.Value\n\treturn jwt.Passes(token)\n}", "func (jwtAuth *JWTAuth) TokenValid(r *http.Request) error {\n\ttokenStr := jwtAuth.ExtractToken(r)\n\ttoken, err := verifyToken(tokenStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (con *Controller) RefreshHandler(tokenCollection *mongo.Collection) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttokenString := c.Query(\"access_token\")\n\t\tif tokenString == \"\" {\n\t\t\tsendFailedResponse(c, http.StatusBadRequest, \"Access token is required\")\n\t\t\treturn\n\t\t}\n\n\t\tobjectID, err := primitive.ObjectIDFromHex(c.Query(\"user\"))\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\tsendFailedResponse(c, http.StatusUnprocessableEntity, \"invalid user id\")\n\t\t\treturn\n\t\t}\n\n\t\tcurrentDate := primitive.NewDateTimeFromTime(time.Now().UTC())\n\t\texistingToken := tokenCollection.FindOne(c, bson.M{\n\t\t\t\"access_token\": tokenString,\n\t\t\t\"logged_out\": false,\n\t\t\t\"expires_at\": bson.M{\n\t\t\t\t\"$lte\": currentDate,\n\t\t\t},\n\t\t})\n\t\tif existingToken.Err() != mongo.ErrNoDocuments {\n\t\t\tsendFailedResponse(c, http.StatusUnprocessableEntity, \"access token rejected\")\n\t\t\treturn\n\t\t}\n\n\t\tclaims := &Claims{}\n\t\ttoken, err := jwt.ParseWithClaims(tokenString, claims, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn secretKey, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\tsendFailedResponse(c, http.StatusInternalServerError, \"something went wrong\")\n\t\t\treturn\n\t\t}\n\n\t\tif !token.Valid {\n\t\t\tsendFailedResponse(c, http.StatusBadRequest, \"invalid token\")\n\t\t\treturn\n\t\t}\n\n\t\tclaims.ExpiresAt = time.Now().Add(6 * time.Hour).Unix()\n\t\tnewToken, err := jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString(secretKey)\n\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\tsendFailedResponse(c, http.StatusInternalServerError, \"something went wrong\")\n\t\t\treturn\n\t\t}\n\n\t\taccessToken := model.AccessToken{\n\t\t\tUserID: objectID,\n\t\t\tAccessToken: newToken,\n\t\t\tLoggedOut: false,\n\t\t\tRevoked: false,\n\t\t\tExpiresAt: primitive.NewDateTimeFromTime(time.Now().Add(6 * time.Hour).UTC()),\n\t\t\tCreatedAt: primitive.NewDateTimeFromTime(time.Now().UTC()),\n\t\t\tUpdatedAt: primitive.NewDateTimeFromTime(time.Now().UTC()),\n\t\t}\n\t\t_, err = tokenCollection.InsertOne(c, accessToken)\n\t\tif err != nil {\n\t\t\tsendFailedResponse(c, http.StatusInternalServerError, \"something went wrong\")\n\t\t\tlog.Println(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\terr = con.Redis.Set(c, claims.Email, newToken, 6*time.Hour).Err()\n\t\tif err != nil {\n\t\t\tlog.Println(\"redis error: \", err.Error())\n\t\t}\n\n\t\tsendSuccessResponse(c, http.StatusOK, gin.H{\n\t\t\t\"access_token\": newToken,\n\t\t})\n\t}\n}", "func CheckTheValidityOfTheTokenFromHTTPHeader(w http.ResponseWriter, r *http.Request) (writer http.ResponseWriter, newToken string, err error) {\n err = createError(011)\n for _, cookie := range r.Cookies() {\n if cookie.Name == \"Token\" {\n var token string\n token, err = CheckTheValidityOfTheToken(cookie.Value)\n //fmt.Println(\"T\", token, err)\n writer = SetCookieToken(w, token)\n newToken = token\n }\n }\n //fmt.Println(err)\n return\n}", "func refreshTokenHandler(w http.ResponseWriter, r *http.Request) {\n\n\t// TODO: Use your own methods to verify an existing user is\n\t// able to refresh their token and then give them a new one\n\n\tif response, err := bjwt.Generate(123456); err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\tresultResponseJSON(w, http.StatusOK, response)\n\t}\n}", "func (r *oauthProxy) expirationHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, _ := r.traceSpan(req.Context(), \"expiration handler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\tuser, err := r.getIdentity(req)\n\tif err != nil || user.isExpired() {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"\", http.StatusUnauthorized, nil)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", jsonMime)\n\tw.WriteHeader(http.StatusOK)\n}", "func TokenValid(r *http.Request) error {\n\ttoken, err := VerifyToken(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (i *Info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func checkJWT(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"content-type\", \"application/json\")\n\t\terr := auth.TokenValid(r)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(`{\"status\":\"error\",\"error\":true,\"msg\":%s}`, \"Unathorized\"), 401)\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\n\t})\n}", "func (m *Manager) expiresFromToken(accessToken string) (int64, error) {\n\ttokenParts := strings.Split(accessToken, \".\")\n\tif len(tokenParts) != 3 {\n\t\treturn 0, fmt.Errorf(\"access token is of invalid format\")\n\t}\n\n\tdecoded, err := base64.RawURLEncoding.DecodeString(tokenParts[1])\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar payload tokenPayload\n\terr = json.Unmarshal(decoded, &payload)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\te := payload.Expires\n\tif e == 0 {\n\t\treturn 0, fmt.Errorf(\"no expire time found in access token\")\n\t}\n\treturn e, nil\n}", "func ValidateToken(accessToken string) (userID string, err error) {\n\n if accessToken == \"\" {\n return \"\", errors.New(\"invalid accessToken\")\n }\n\n db, err := bolt.Open(DB_NAME, 0600, nil)\n if err != nil {\n panic(err)\n }\n defer db.Close()\n\n err = db.View(func(tx *bolt.Tx) error {\n b := tx.Bucket([]byte(TOKEN_BUCKET))\n\n v := b.Get([]byte(accessToken))\n if v == nil {\n return errors.New(\"invalid accessToken\")\n }\n\n var token Token\n if err := json.Unmarshal(v, &token); err != nil {\n return errors.New(\"invalid accessToken (failed parsing JSON from database)\")\n }\n\n //TODO: validate TTL of token\n\n userID = token.UserID\n\n return nil\n })\n\n if err != nil {\n userID = \"\"\n }\n\n return userID, err\n}", "func RequiresAuth(token string) (bool, error) {\n\texists, err := TokenExists(token)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif exists {\n\t\tres, err := r.Table(\"tokens\").Filter(map[string]interface{}{\"Token\": token}).Run(session)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tvar transformToken modelToken.JWT\n\t\tres.One(&transformToken)\n\t\tres.Close()\n\n\t\tresu, err := r.Table(\"users\").Filter(map[string]interface{}{\"Name\": transformToken.Audience}).Run(session)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tvar user modelUser.User\n\t\tresu.One(&user)\n\t\tresu.Close()\n\n\t\tif transformToken.Expires <= time.Now().Unix() {\n\t\t\treturn true, nil\n\t\t}\n\t\tif transformToken.Expires > time.Now().Unix() {\n\t\t\tRevokeToken(&user)\n\t\t\treturn false, errors.New(\"Token has expired and been revoked.\")\n\t\t}\n\t}\n\treturn false, errors.New(\"The Token Does Not Exist\")\n}", "func (j *Service) IsExpired(claims Claims) bool {\n\treturn !claims.VerifyExpiresAt(time.Now().Unix(), true)\n}", "func ValidateToken(tok *oauth2.Token) error {\n\tif util.TimeIsZero(tok.Expiry) {\n\t\treturn fmt.Errorf(\"Token has no expiration!\")\n\t}\n\tnow := time.Now()\n\tif now.After(tok.Expiry) {\n\t\t// This case is covered by tok.Valid(), but we want to provide a\n\t\t// better error message.\n\t\treturn fmt.Errorf(\"Token is expired! Expiry: %s; time is now %s.\", tok.Expiry, now)\n\t}\n\tif !tok.Valid() {\n\t\treturn fmt.Errorf(\"Token is invalid!\")\n\t}\n\treturn nil\n}", "func (t *TokenClaims) Valid() error {\n\tif t.IsMaster() {\n\t\treturn nil\n\t}\n\n\tif !t.Expiry.After(time.Now()) {\n\t\treturn ErrTokenExpired\n\t}\n\treturn nil\n}", "func TestTokenExpiracy(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"0\")\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\ttime.Sleep(1 * time.Second)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \" + user.Token\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 401)\n\tassert.Equal(t, message.Message, \"Token expired.\")\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"15\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func (m *GinMiddleware) CheckAuthToken() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tginWrap := ginutil.NewGinWrap(c, m.log)\n\t\t// get context\n\t\tctx := ginWrap.AppCtx\n\t\tif ctx == nil {\n\t\t\treturn\n\t\t}\n\t\theaderAuth := c.GetHeader(ginutil.HeaderAuthorization)\n\t\theaderAuth = strings.TrimPrefix(headerAuth, \"Bearer \")\n\n\t\tvar claims, err = m.IAuth.CheckJwtToken(headerAuth)\n\t\tif err != nil {\n\t\t\tif errors.As(err, &jwt2.ValidationError{}) {\n\t\t\t\tvalidateErr := err.(*jwt2.ValidationError)\n\t\t\t\tif errors.Is(validateErr.Inner, jwt.ErrIatTime) {\n\t\t\t\t\tginWrap.AbortWithErr(errs.NewInvalidToken().WithMsg(\"token format error\"))\n\t\t\t\t\treturn\n\t\t\t\t} else if errors.Is(validateErr.Inner, jwt.ErrTimeExp) {\n\t\t\t\t\tc.AbortWithStatusJSON(200, com_model.NewRetFromErr(errs.NewInvalidToken().WithMsg(\"token expired\")))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tc.AbortWithStatusJSON(200, com_model.NewRetFromErr(errs.NewInvalidToken().WithMsg(\"token error\")))\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tc.AbortWithStatusJSON(200, com_model.NewRetFromErr(errs.NewInvalidToken().WithMsg(\"unknown token error\")))\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\thas, err := m.IAuth.HasUser(ctx, claims.UserId)\n\t\t\tif err != nil {\n\t\t\t\tginWrap.AbortWithErr(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !has {\n\t\t\t\tginWrap.AbortWithAppErr(errs.NewInvalidToken())\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// set user id to ctx\n\t\t\tvalues := context2.MustGetAppValues(ctx)\n\t\t\tvalues.UserId = claims.UserId // 设置用户id\n\t\t\tc.Next()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (dc *DelegatedCredential) isExpired(start, now time.Time) bool {\n\tend := start.Add(dc.cred.validTime)\n\treturn !now.Before(end)\n}", "func (a *Service) ValidateJweToken(token string) (map[string]interface{}, *error_utils.ApiError) {\n\n\t// parse token string\n\tclaims, err := a.parseTokenString(token)\n\tif err != nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(err.Error())\n\t}\n\n\t// validate dates\n\tif claims[\"orig_iat\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat is missing\")\n\t}\n\n\t// try convert to float64\n\tif _, ok := claims[\"orig_iat\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat must be float64 format\")\n\t}\n\n\t// get value and validate\n\torigIat := int64(claims[\"orig_iat\"].(float64))\n\tif origIat < a.timeFunc().Add(-a.maxRefresh).Unix() {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\n\t// check if exp exists in map\n\tif claims[\"exp\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp is missing\")\n\t}\n\n\t// try convert to float 64\n\tif _, ok := claims[\"exp\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp must be float64 format\")\n\t}\n\n\t// get value and validate\n\texp := int64(claims[\"exp\"].(float64))\n\tif exp < a.timeFunc().Unix(){\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\t// validate dates\n\n\t// validate issuer\n\t// check if iss exists in map\n\tif claims[\"iss\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss is missing\")\n\t}\n\n\t// try convert to string\n\tif _, ok := claims[\"iss\"].(string); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss must be string format\")\n\t}\n\n\t// get value and validate\n\tissuer := claims[\"iss\"]\n\tif issuer != a.issuer{\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Invalid issuer\")\n\t}\n\t// validate issuer\n\n\treturn claims, nil\n}", "func (t *tokenCache) validateAccessToken(token xoauth2.Token) bool {\n\tif token.AccessToken == \"\" {\n\t\treturn false\n\t}\n\tif !token.Expiry.IsZero() && t.clock.Now().After(token.Expiry.Round(0).Add(-expiryDelta)) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (c *Client) HasValidToken() bool {\n\treturn c.lastRefresh != nil && c.lastRefresh.Add(c.expiresIn).After(time.Now())\n}", "func ParseAuthTokenMiddleware(s *Setup) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\ttoken, _ := request.ParseFromRequest(r, request.AuthorizationHeaderExtractor,\n\t\t\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t\t\t\t}\n\t\t\t\t\treturn s.TokenSigningSecret, nil\n\t\t\t\t})\n\t\t\t// error from ParseFromRequest is ignored because returns errors for expired\n\t\t\t// requests and other cases that have nothing to do with parsing.\n\t\t\tif token != nil {\n\t\t\t\tclaimMap, _ := token.Claims.(jwt.MapClaims)\n\t\t\t\tisInBlacklist, _ := s.AuthService.IsInBlacklist(r.Header.Get(\"Authorization\"))\n\t\t\t\tswitch {\n\t\t\t\tcase isInBlacklist:\n\t\t\t\t\t// has logged out token\n\t\t\t\t\tbreak\n\t\t\t\tcase token.Valid:\n\t\t\t\t\t// if valid and not expired\n\t\t\t\t\tusername := claimMap[\"sub\"]\n\t\t\t\t\tr.Header.Set(\"authorized_username\", username.(string))\n\t\t\t\t\tr.Header.Del(\"authorized_username_expired\")\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t\treturn\n\t\t\t\tcase claimMap.VerifyExpiresAt(time.Now().Add(-s.TokenRefreshLifetime).Unix(), true):\n\t\t\t\t\t// if expired but still refreshable\n\t\t\t\t\tusername := claimMap[\"sub\"]\n\t\t\t\t\tr.Header.Set(\"authorized_username_expired\", username.(string))\n\t\t\t\t\tr.Header.Del(\"authorized_username\")\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\t// if too expired\n\t\t\t\t\ts.Logger.Printf(\"access with expired token\")\n\t\t\t\t}\n\t\t\t}\n\t\t\t// if not accepted\n\t\t\tr.Header.Set(\"authorized_username\", \"---HerUsername25Letters--\")\n\t\t\tr.Header.Del(\"authorized_username_expired\")\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func (v value) expired(c *Cache) bool{\n return time.Since(v.time)>c.expire\n}", "func ValidateJwtMiddlewear(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tauthToken := r.Header.Get(\"Authorization\")\n\t\tsplitToekn := strings.Split(authToken, \"Bearer\")\n\t\tif len(splitToekn) != 2 {\n\t\t\tutils.RespondError(w, 401, \"Invalid Token\")\n\t\t\treturn\n\t\t}\n\t\ttokenSring := strings.TrimSpace(splitToekn[1])\n\n\t\ttoken, err := jwt.ParseWithClaims(tokenSring, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte([]byte(os.Getenv(\"JWT_SECRATE\"))), nil\n\t\t})\n\n\t\tif claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {\n\t\t\t//todo: add logic to check token xpired or not\n\t\t\tfmt.Printf(\"%v %v\", claims.Token, claims.StandardClaims.ExpiresAt)\n\t\t} else {\n\t\t\t//todo : add some changes for catching error\n\t\t\tfmt.Println(err)\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func (p *UserPermissions) Expired(ttl time.Duration, now time.Time) bool {\n\treturn !now.Before(p.UpdatedAt.Add(ttl))\n}", "func (m *manager) Validate(r *http.Request) error {\n\ttokenString, err := getToken(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method was used in JWT token making it invalid: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn m.secret, nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s:%v\", \"invalid JWT token\", err)\n\t}\n\n\tif token == nil {\n\t\treturn fmt.Errorf(\"%s\", \"invalid JWT token\")\n\t}\n\n\tif !token.Valid {\n\t\treturn fmt.Errorf(\"%s\", \"invalid JWT token\")\n\t}\n\n\tfor i := range m.options {\n\t\topt, ok := m.options[i].(*option)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"error in type assertion in jwt token\")\n\t\t}\n\n\t\tswitch opt.optionType {\n\t\tcase optLifeSpan: // do nothing, this option is for the client side\n\t\tcase optEnforceExpiry: // if enforce is set, claims must have expiry\n\t\t\tclaims, ok := token.Claims.(jwt.MapClaims)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"error in type assertion in jwt claims\")\n\t\t\t}\n\n\t\t\tif _, ok := claims[exp]; !ok {\n\t\t\t\treturn fmt.Errorf(\"all claims must have expiry in their claims\")\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid option type\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *Server) authTokenVerifier(auth JWTVerifier) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tauthHeader := getHeader(r, headerAuthorization)\n\n\t\t\tif authHeader == \"\" {\n\t\t\t\ts.logger.Print(\"no authorization header found\")\n\t\t\t\thttp.Error(w, \"missing authorization header\", http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsplitAuthHeader := strings.Split(authHeader, \" \")\n\t\t\tif len(splitAuthHeader) != 2 {\n\t\t\t\ts.logger.Printf(\"authorzation header value invalid: %s\", splitAuthHeader)\n\t\t\t\thttp.Error(w, \"improperly formatted authorization header\", http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tuserID, err := auth.VerifyJWT(r.Context(), splitAuthHeader[1])\n\t\t\tif err != nil {\n\t\t\t\ts.logger.Print(err)\n\t\t\t\thttp.Error(w, \"failed to verify authentication token\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif userID != getAccountID(r).String() {\n\t\t\t\thttp.Error(w, \"not authorized for given account\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func (p *UserPendingPermissions) Expired(ttl time.Duration, now time.Time) bool {\n\treturn !now.Before(p.UpdatedAt.Add(ttl))\n}", "func (s *server) CheckToken(ctx context.Context, in *pb.LogRequest) (*pb.LogResponse, error) {\n\tlog.Printf(\"Received: %v\", \"Check token\")\n\tis, err := CheckToken(in.Email, in.Token)\n\tif err != nil {\n\t\treturn &pb.LogResponse{Sucess: false}, nil\n\t}\n\treturn &pb.LogResponse{Sucess: is}, nil\n}", "func (s *StaticProvider) IsExpired() bool {\n\treturn false\n}", "func isExpired(timestamp interface{}) bool {\n\tif validity, ok := timestamp.(float64); ok {\n\t\ttm := time.Unix(int64(validity), 0)\n\t\tremainder := tm.Sub(time.Now())\n\t\tif remainder > 0 {\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\tglog.Error(\"Error casting timestamp to string. This may be due to a invalid token\")\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\treturn false\n}", "func (c *Claim) Valid() error {\n\tvErr := new(jwt.ValidationError)\n\tnow := jwt.TimeFunc().Unix()\n\n\tif c.VerifyExpiresAt(now, true) == false {\n\t\tdelta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))\n\t\tvErr.Inner = fmt.Errorf(\"token is expired by %v\", delta)\n\t\tvErr.Errors |= jwt.ValidationErrorExpired\n\t}\n\n\tif c.VerifyIssuedAt(now, false) == false {\n\t\tvErr.Inner = fmt.Errorf(\"Token used before issued\")\n\t\tvErr.Errors |= jwt.ValidationErrorIssuedAt\n\t}\n\n\tif c.VerifyNotBefore(now, false) == false {\n\t\tvErr.Inner = fmt.Errorf(\"token is not valid yet\")\n\t\tvErr.Errors |= jwt.ValidationErrorNotValidYet\n\t}\n\n\tif vErr.Errors == 0 {\n\t\treturn nil\n\t}\n\n\treturn vErr\n}", "func (o *OAuth2Config) IsExpired() bool {\n\treturn o.Token == nil || !o.Token.Valid()\n}", "func (config *Config) refreshIAMTokenIfNeeded() error {\n\tlog(\"Validating IAM token locally\")\n\tif claims, err := parseJWTToken(config.Context.IAMToken()); err == nil {\n\t\tif err := claims.Valid(); err != nil {\n\t\t\tlog(\"The IAM token is expired, attempting to refresh\")\n\t\t\tif updatedToken, err := config.Context.RefreshIAMToken(); err == nil {\n\t\t\t\tlogf(\"Updated IAM token successfully, new token is: %s\", updatedToken)\n\t\t\t\tlog(\"Sleeping for a moment to allow token issue time to become valid\")\n\t\t\t\tconfig.AccessToken = updatedToken\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t} else {\n\t\t\t\tlog(\"Failed to refresh IAM token\")\n\t\t\t\treturn errors.New(\"Login token is expired. Please update tokens using 'ibmcloud login' and try again.\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *CSRFStore) verifyToken(headerToken string) error {\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\t// check if token is initialized\n\tif c.token == nil || len(c.token.Value) == 0 {\n\t\treturn errors.New(\"token not initialized\")\n\t}\n\n\ta, err := base64.RawURLEncoding.DecodeString(headerToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check if token values are same, using a constant time comparison\n\tif subtle.ConstantTimeCompare(a, c.token.Value) != 1 {\n\t\treturn errors.New(\"invalid token\")\n\t}\n\n\t// make sure token is still valid\n\tif c.expired() {\n\t\treturn errors.New(\"token has expired\")\n\t}\n\n\treturn nil\n}", "func IsTokenValid(token string, tokenExpireDurationDiff time.Duration) bool {\n\tif token == \"\" {\n\t\treturn false\n\t}\n\n\tparser := jwt.NewParser(jwt.WithLeeway(tokenExpireDurationDiff))\n\n\tvar claims jwt.RegisteredClaims\n\n\t_, _, err := parser.ParseUnverified(token, &claims)\n\treturn err == nil\n}", "func GetExpiredTime() time.Duration {\n\tday := viper.GetInt(\"token.expired\")\n\treturn time.Hour * 24 * time.Duration(day)\n}", "func (s *RedisSystem) RedeemToken(token string) bool {\n\tif s.currentToken == \"\" || token > s.currentToken {\n\t\ts.currentToken = token\n\t}\n\ttokenValid := token >= s.currentToken\n\tif !tokenValid {\n\t\tlogInfo(\"invalid token: %s is not greater or equal to %s\", token, s.currentToken)\n\t}\n\treturn tokenValid\n}", "func (mid *Middleware) AccessControl(next http.HandlerFunc) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tauth := strings.Fields(r.Header.Get(\"Authorization\"))\n\t\tif len(auth) > 1 && auth[0] == \"Bearer\" {\n\t\t\t// Found token\n\n\t\t\t// Check that token is not in redis\n\t\t\tloggedOut, _ := mid.RedisClient.ExistsKey(\"blacklist:\" + auth[1])\n\n\t\t\tif loggedOut {\n\t\t\t\tw.WriteHeader(401)\n\t\t\t\t// TODO: Change this to return a JSON object\n\t\t\t\tfmt.Fprintf(w, \"Invalid token\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tclaims, err := VerifyJWT(auth[1])\n\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(401)\n\t\t\t\t// TODO: Change this to return a JSON object\n\t\t\t\tfmt.Fprintf(w, \"Invalid token\")\n\t\t\t} else {\n\t\t\t\tuserID := claims[\"userId\"].(string)\n\t\t\t\texp := claims[\"exp\"].(float64)\n\t\t\t\texpS := strconv.FormatFloat(exp, 'E', -1, 64)\n\t\t\t\tr.Header.Add(\"uid\", userID)\n\t\t\t\tr.Header.Add(\"exp\", expS)\n\t\t\t\tr.Header.Add(\"token\", auth[1])\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t}\n\t\t} else {\n\t\t\tw.WriteHeader(401)\n\t\t\t// TODO: Change this to return a JSON object\n\t\t\tfmt.Fprintf(w, \"Invalid token\")\n\t\t}\n\t})\n}", "func (p *RepoPermissions) Expired(ttl time.Duration, now time.Time) bool {\n\treturn !now.Before(p.UpdatedAt.Add(ttl))\n}", "func (t *Token) IsExpired() bool {\n\tif t.Expiry.IsZero() {\n\t\treturn false\n\t}\n\treturn t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())\n}", "func restrictedHandler(w http.ResponseWriter, r *http.Request) {\n\t// Get token from request\n\ttoken, err := request.ParseFromRequest(r, request.OAuth2Extractor, func(token *jwt.Token) (interface{}, error) {\n\t\t// since we only use the one private key to sign the tokens,\n\t\t// we also only use its public counter part to verify\n\t\treturn verifyKey, nil\n\t}, request.WithClaims(&CustomClaimsExample{}))\n\n\t// If the token is missing or invalid, return error\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tfmt.Fprintln(w, \"Invalid token:\", err)\n\t\treturn\n\t}\n\n\t// Token is valid\n\tfmt.Fprintln(w, \"Welcome,\", token.Claims.(*CustomClaimsExample).Name)\n}", "func TokenRefresherMiddleware(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\t// If the user is not authenticated (no user token data in the context), don't do anything.\n\t\tif c.Get(\"user\") == nil {\n\t\t\treturn next(c)\n\t\t}\n\t\t// Gets user token from the context.\n\t\tu := c.Get(\"user\").(*jwt.Token)\n\n\t\tclaims := u.Claims.(*Claims)\n\n\t\t// We ensure that a new token is not issued until enough time has elapsed\n\t\t// In this case, a new token will only be issued if the old token is within\n\t\t// 15 mins of expiry.\n\t\tif time.Unix(claims.ExpiresAt, 0).Sub(time.Now()) < 15*time.Minute {\n\t\t\t// Gets the refresh token from the cookie.\n\t\t\trc, err := c.Cookie(refreshTokenCookieName)\n\t\t\tif err == nil && rc != nil {\n\t\t\t\t// Parses token and checks if it valid.\n\t\t\t\ttkn, err := jwt.ParseWithClaims(rc.Value, claims, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\t\treturn []byte(GetRefreshJWTSecret()), nil\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t\t\t\tc.Response().Writer.WriteHeader(http.StatusUnauthorized)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif tkn != nil && tkn.Valid {\n\t\t\t\t\t// If everything is good, update tokens.\n\t\t\t\t\t_ = GenerateTokensAndSetCookies(&user.User{\n\t\t\t\t\t\tName: claims.Name,\n\t\t\t\t\t}, c)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn next(c)\n\t}\n}", "func (tm *IAMTokenManager) saveToken(tokenInfo *IAMTokenInfo) {\n\taccessToken := tokenInfo.AccessToken\n\n\tclaims := jwt.StandardClaims{}\n\tif token, _ := jwt.ParseWithClaims(accessToken, &claims, nil); token != nil {\n\t\ttimeToLive := claims.ExpiresAt - claims.IssuedAt\n\t\texpireTime := claims.ExpiresAt\n\t\tfractionOfTimeToLive := 0.8\n\t\ttimeForNewToken := expireTime - (timeToLive * int64(1.0-fractionOfTimeToLive))\n\t\ttm.timeForNewToken = timeForNewToken\n\t}\n\n\ttm.tokenInfo = tokenInfo\n}", "func IsFixedTokenVerified(config *envcfg.Envcfg, auth string) error {\n\t// Bearer token as RFC 6750 standard\n\tif strings.Split(auth, \" \")[0] != TYPE || strings.Split(auth, \" \")[1] != config.GetKey(\"jwt.fixedtoken\") {\n\t\treturn errors.New(\"Invalid token\")\n\t}\n\n\treturn nil\n}", "func (r *record) isExpired(now time.Time) bool {\n\tif r.Expires == 0 {\n\t\treturn false\n\t}\n\texpiryDateUTC := time.Unix(r.Expires, 0).UTC()\n\treturn now.UTC().After(expiryDateUTC)\n}", "func TestTokenTTL(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tdeleteAll(t)\n\tadminClient := getPachClient(t, admin)\n\n\t// Create repo (so alice has something to list)\n\trepo := tu.UniqueString(\"TestTokenTTL\")\n\trequire.NoError(t, adminClient.CreateRepo(repo))\n\n\t// Create auth token for alice\n\talice := tu.UniqueString(\"alice\")\n\tresp, err := adminClient.GetAuthToken(adminClient.Ctx(), &auth.GetAuthTokenRequest{\n\t\tSubject: alice,\n\t\tTTL: 5, // seconds\n\t})\n\trequire.NoError(t, err)\n\taliceClient := adminClient.WithCtx(context.Background())\n\taliceClient.SetAuthToken(resp.Token)\n\n\t// alice's token is valid, but expires quickly\n\trepos, err := aliceClient.ListRepo()\n\trequire.NoError(t, err)\n\trequire.ElementsEqualUnderFn(t, []string{repo}, repos, RepoInfoToName)\n\trequire.NoError(t, backoff.Retry(func() error {\n\t\trepos, err = aliceClient.ListRepo()\n\t\tif err == nil {\n\t\t\treturn errors.New(\"alice still has access to ListRepo\")\n\t\t}\n\t\trequire.True(t, auth.IsErrBadToken(err), err.Error())\n\t\trequire.Equal(t, 0, len(repos))\n\t\treturn nil\n\t}, backoff.NewTestingBackOff()))\n}", "func TestTokenRefreshLimit(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tos.Setenv(\"TOKEN_LIMIT_HOURS\", \"0\")\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\ttime.Sleep(1 * time.Second)\n\n\tvar url string = \"/v1/refresh/token\"\n\tvar bearer = \"Bearer \" + user.Token\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"POST\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 401)\n\tassert.Equal(t, message.Message, \"Token has expired and cannot be refreshed, please reconnect\")\n\n\tos.Setenv(\"TOKEN_LIMIT_HOURS\", \"24\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func ValidateToken(tokenString string, w http.ResponseWriter) (Claims, error) {\n\tclaims := Claims{}\n\tjwtKey := []byte(config.Configuration.TokenPrivateKey)\n\n\t// The token string is parsed, decoded and stored into the given Claims struct\n\ttoken, err := jwt.ParseWithClaims(tokenString, &claims,\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\treturn jwtKey, nil\n\t\t})\n\n\t// Check if the token has expired according to the expiry time fixed during the sign in\n\tif !token.Valid {\n\t\terr = ExpiredToken\n\t\tMakeErrorResponse(w, http.StatusUnauthorized, err.Error())\n\t\tlog.Println(err.Error())\n\t\treturn claims, err\n\t}\n\n\t// Check if the token has been signed with the private key of the api gateway\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t// If the token is expired or has not been signed according to the api gateway key, an Unauthorization code\n\t\t\t// is returned in both cases, but a different message is provided to the client.\n\t\t\tMakeErrorResponse(w, http.StatusUnauthorized, \"Wrong credentials\")\n\t\t\tlog.Println(\"Wrong credentials\")\n\t\t\treturn claims, err\n\t\t}\n\n\t\tMakeErrorResponse(w, http.StatusBadRequest, \"Malformed token\")\n\t\tlog.Println(\"Malformed token\")\n\t\treturn claims, err\n\t}\n\n\treturn claims, nil\n\n}", "func (a *ACLToken) IsExpired(t time.Time) bool {\n\n\t// Check the token has an expiration time before potentially modifying the\n\t// supplied time. This allows us to avoid extra work, if it isn't needed.\n\tif !a.HasExpirationTime() {\n\t\treturn false\n\t}\n\n\t// Check and ensure the time location is set to UTC. This is vital for\n\t// consistency with multi-region global tokens.\n\tif t.Location() != time.UTC {\n\t\tt = t.UTC()\n\t}\n\n\treturn a.ExpirationTime.Before(t) || t.IsZero()\n}", "func HasExpired(dev *schemas.Developer) bool {\n\t// null time or before now\n\treturn dev.Expiration.Equal(time.Time{}) || dev.Expiration.Before(time.Now())\n}", "func CheckJwt(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\terr := tokenMiddleware.CheckJWT(w, r)\n\t\tif err != nil {\n\t\t\t//if theres a token validation error then return and dont execute the next handler\n\t\t\treturn\n\t\t} else {\n\t\t\t//token is fine, move to next handler\n\t\t\tnext.ServeHTTP(w, r)\n\t\t}\n\t}\n}", "func refreshToken(ctx iris.Context, u *customClaims) error {\n\tt := time.Now()\n\tu.ExpiresAt = t.Add(expireDelay).Unix()\n\tu.IssuedAt = t.Unix()\n\ttokenString, err := getTokenString(u)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx.Header(\"Authorization\", \"Bearer \"+tokenString)\n\tctx.Header(\"Access-Control-Expose-Headers\", \"Authorization\")\n\treturn nil\n}" ]
[ "0.7342315", "0.730334", "0.7238402", "0.69408375", "0.6927644", "0.68055797", "0.67880195", "0.67240477", "0.669694", "0.66420776", "0.65723866", "0.6530594", "0.6482227", "0.64696825", "0.6413906", "0.6362124", "0.63578933", "0.6346317", "0.62503326", "0.62242067", "0.62214416", "0.62175035", "0.6216455", "0.62006813", "0.61275244", "0.60996556", "0.6082391", "0.6071554", "0.6054", "0.6050812", "0.60502726", "0.6024296", "0.6020085", "0.60094565", "0.59983546", "0.5997627", "0.5992801", "0.5985159", "0.59246254", "0.59209204", "0.5920673", "0.5915496", "0.59142154", "0.5905943", "0.5893946", "0.5887053", "0.5865581", "0.5863301", "0.5862148", "0.58566004", "0.583915", "0.5835688", "0.58201015", "0.58174497", "0.58161795", "0.58064616", "0.5802601", "0.5792403", "0.57806224", "0.57671475", "0.5753274", "0.57517225", "0.57493114", "0.573703", "0.5720785", "0.57072854", "0.5700663", "0.5693964", "0.5673525", "0.56663585", "0.56541795", "0.56527746", "0.5649226", "0.56480575", "0.56330436", "0.5623507", "0.56185377", "0.56141883", "0.56122565", "0.561216", "0.56112665", "0.5602781", "0.55929476", "0.55918014", "0.55828613", "0.5572705", "0.5572365", "0.557225", "0.5560635", "0.55594844", "0.5554808", "0.5552172", "0.5545566", "0.55336773", "0.5522767", "0.5520245", "0.55192065", "0.5498964", "0.5494532", "0.5493131", "0.54752314" ]
0.0
-1
InitDB removes existing db file and creates a new DB and table for IP
func InitDB() { os.Remove("./threat_analyser.db") var err error db, err = sql.Open("sqlite3", "./threat_analyser.db") if err != nil { log.Fatal(err) } createCmd := ` create table ip (ip_address TEXT PRIMARY KEY, uuid TEXT, created_at DATETIME, updated_at DATETIME, response_code TEXT); ` _, err = db.Exec(createCmd) if err != nil { log.Fatal("Error creating DB table", err) return } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (i *API) InitDB(purge bool) error {\n\tif purge {\n\t\ti.purgeDB()\n\t}\n\treturn i.openDB()\n}", "func InitDatabase(dbName *string, dst ...interface{}) {\n\tlog.Info().Msgf(\"Loading database %v\", *dbName)\n\tvar err error\n\tdbFile = sqlite.Open(fmt.Sprintf(\"%v.db\", *dbName))\n\tdatastore, err = gorm.Open(dbFile, &gorm.Config{\n\t\tDisableForeignKeyConstraintWhenMigrating: true,\n\t})\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\n\t// Migrate the schema\n\terr = datastore.AutoMigrate(dst...)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"Migration failed! Please check the logs!\")\n\t}\n}", "func InitDB(dbfile string) error {\n\tif _, err := os.Stat(dbfile); os.IsNotExist(err) {\n\t\terr := createDB(dbfile)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to create db\")\n\t\t}\n\t} else if err != nil {\n\t\treturn errors.Wrap(err, \"unexpected error looking for sqlite3 db file\")\n\t} else {\n\t\tlog.Printf(\"%s found\", dbfile)\n\t}\n\n\treturn nil\n}", "func (env *Env) InitDB(filename string) {\n\t// sqlite3 database\n\tdb, err := gorm.Open(\"sqlite3\", filename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// setup schema\n\tdb.AutoMigrate(&WineInfo{})\n\n\tenv.db = db\n}", "func InitDBFile(config Config) {\n\tif !FileExists(config.StorePath) {\n\t\terr := os.MkdirAll(config.StorePath, 0755)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tif !FileExists(path.Join(config.StorePath, DefaultDBName)) {\n\t\terr := ioutil.WriteFile(path.Join(config.StorePath, DefaultDBName), []byte(\"\"), 0755)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}", "func InitDb(appConfig *AppConfig) {\n\tlog.Info(\"Initialize database connection\")\n\tDbs = fmt.Sprintf(\"host=%s port=%s user=%s password=%s dbname=%s sslmode=%s\",\n\t\tappConfig.Db.Host,\n\t\tappConfig.Db.Port,\n\t\tappConfig.Db.User,\n\t\tappConfig.Db.Password,\n\t\tappConfig.Db.DbName,\n\t\tappConfig.Db.SSLMode,\n\t)\n\tlog.Info(\"Successfully initialize database connection\")\n\tdb := GetDB()\n\tlog.Info(\"Start table migrations\")\n\tdb.AutoMigrate(\n\t\t&Session{},\n\t)\n\tlog.Info(\"Table migrations achieved\")\n}", "func InitDB(setting *domain.GlobalConfig) {\n\tsource := \"\"\n\tswitch setting.DBType {\n\tcase domain.SQLITE3:\n\t\tlogrus.Info(\"InitDB has done when new client, skip.\")\n\t\treturn\n\tcase domain.MYSQL:\n\t\tsource = fmt.Sprintf(\"%s:%s@tcp(%s:%s)/\",\n\t\t\tsetting.DBUser, setting.DBPassword, setting.DBHost, setting.DBPort)\n\tdefault:\n\t\tsource = fmt.Sprintf(\"%s:%s@tcp(%s:%s)/\",\n\t\t\tsetting.DBUser, setting.DBPassword, setting.DBHost, setting.DBPort)\n\t}\n\n\tdb, err := sql.Open(setting.DBType, source)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"connection to db error: %s\", err)\n\t}\n\tdefer db.Close()\n\n\tsql := \"CREATE DATABASE IF NOT EXISTS \" + setting.DBName + \";\"\n\t_, err = db.Exec(sql)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"create db %s error: %v\", setting.DBName, err)\n\t}\n}", "func InitDb(conf config.Config, reset bool) error {\n\tif !IsOpen() {\n\t\tif err := openAdapter(conf); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn adp.CreateDb(reset)\n}", "func (p *Pool) initDB() error {\n\t// Create and open the database.\n\tdb, err := database.OpenDB(p.cfg.DBFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.db = db\n\terr = database.CreateBuckets(p.db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check if the pool mode changed since the last run.\n\tvar switchMode bool\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tpbkt := tx.Bucket(database.PoolBkt)\n\t\tif pbkt == nil {\n\t\t\treturn err\n\t\t}\n\n\t\tv := pbkt.Get(database.SoloPool)\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tspMode := binary.LittleEndian.Uint32(v) == 1\n\t\tif p.cfg.SoloPool != spMode {\n\t\t\tswitchMode = true\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the pool mode changed, backup the current database and purge all data\n\t// for a clean slate with the updated pool mode.\n\tif switchMode {\n\t\tpLog.Info(\"Pool mode changed, backing up database before purge.\")\n\t\terr := database.Backup(p.db)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = database.Purge(p.db)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// If the pool mode did not change, upgrade the database if there is a\n\t// pending upgrade.\n\tif !switchMode {\n\t\terr = database.Upgrade(p.db)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func InitDB() (*gorm.DB, error) {\n\tdb, err := gorm.Open(\"sqlite3\", \"./url.db\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb.LogMode(true)\n\tmodels.Migrate(db)\n\treturn db, err\n}", "func (db *Postgres) initDB() error {\n\t// Create the schema\n\t// @afiune Can we rename this library?\n\tif err := migrator.Migrate(db.URI, db.SchemaPath); err != nil {\n\t\treturn errors.Wrapf(err, \"Unable to create database schema. [path:%s]\", db.SchemaPath)\n\t}\n\n\t// Add the tables to the database mappings\n\tdb.AddTableWithName(deployment{}, \"deployment\").SetKeys(true, \"id\")\n\tdb.AddTableWithName(supervisor{}, \"supervisor\").SetKeys(true, \"id\")\n\tdb.AddTableWithName(serviceGroup{}, \"service_group\").SetKeys(true, \"id\")\n\tdb.AddTableWithName(service{}, \"service\").SetKeys(true, \"id\")\n\n\t//return db.CreateTablesIfNotExists() // I don't think we can ensure the foreign keys\n\treturn nil\n}", "func InitDB() {\n\tvar err error\n\tvar dsn = os.Getenv(\"REVELAPP_DBUSER\") +\n\t\t\":\" + os.Getenv(\"REVELAPP_DBPASSWD\") +\n\t\t\"@\" + os.Getenv(\"REVELAPP_DBHOSTNAME\") +\n\t\t\"/\" + os.Getenv(\"REVELAPP_DBNAME\") +\n\t\t\"?parseTime=true&loc=Asia%2FTokyo\"\n\t// open db\n\tGdb, err = gorm.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tprintln(\"FATAL\", err)\n\t\tpanic(err)\n\t}\n\tautoMigrate()\n\t// unique index if need\n\t//Gdb.Model(&models.User{}).AddUniqueIndex(\"idx_user_name\", \"name\")\n}", "func (link *Link) InitDB(exec sqlx.Execer, dbName string) (errExec error) {\n\t_, errExec = exec.Exec(`CREATE DATABASE IF NOT EXISTS ` + dbName)\n\tif errExec != nil {\n\t\treturn errExec\n\t}\n\n\t_, errExec = exec.Exec(`USE ` + dbName)\n\treturn errExec\n}", "func initdb() {\n\tdir, _ := os.Getwd()\n\tdatabasepath := filepath.Join(dir, \"tasks.db\")\n\tdbrepository.InitDatabase(databasepath)\n}", "func (sql *SqlConnection) InitDB() error {\n\n\tvar err error\n\n\t// open a db connection //\n\tsql.Db, err = gorm.Open(\"sqlite3\", \"/var/tmp/tennis.db\")\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect database : \", err.Error())\n\t}\n\tsql.Db.LogMode(true)\n\n\treturn err\n}", "func InitDB(path string) error {\n\tvar err error\n\tdb, err = bolt.Open(path, 0600, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tif _, err := tx.CreateBucketIfNotExists([]byte(\"dailymeetings\")); err != nil {\n\t\t\treturn fmt.Errorf(\"dbutils: create bucket: %s\", err)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn db.Update(func(tx *bolt.Tx) error {\n\t\tif _, err := tx.CreateBucketIfNotExists([]byte(\"predefinedreplies\")); err != nil {\n\t\t\treturn fmt.Errorf(\"dbutils: create bucket: %s\", err)\n\t\t}\n\t\treturn nil\n\t})\n}", "func InitDb(drop bool) {\n\tdb, err := dbOpen()\n\tdefer db.Close()\n\tif drop {\n\t\tstatement, err := db.Prepare(\"DROP TABLE IF EXISTS mail\")\n\t\tdefer statement.Close()\n\t\tcheckError(err)\n\t\tstatement.Exec()\n\t}\n\tstatement, err := db.Prepare(\n\t\t\"CREATE TABLE IF NOT EXISTS mail (id INTEGER PRIMARY KEY, sender TEXT, receiver TEXT, subject TEXT, text TEXT, html TEXT)\",\n\t)\n\tdefer statement.Close()\n\tcheckError(err)\n\tstatement.Exec()\n}", "func InitDB(dbPath string) error {\n\tvar err error\n\n\tdb, err = bolt.Open(\n\t\tdbPath, 0600, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn db.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists(taskBucket)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"create bucket: %s\", err)\n\t\t}\n\t\treturn nil\n\t})\n}", "func InitDB(ctx *cli.Context) error {\n\tchainID := ctx.String(\"chainID\")\n\tregDb := ConnectToDB(ctx.String(\"dataport\"), ctx.String(\"passwd\"), ctx.Int(\"database\"))\n\tif Exists(regDb, \"chainConfig\") {\n\t\tresult := Get(regDb, \"chainConfig\")\n\t\tchainConfig := new(Identity)\n\t\tif err := json.Unmarshal([]byte(result), &chainConfig); err != nil {\n\t\t\tutils.Fatalf(\"Failed to initialise database: %v\", err)\n\t\t}\n\t\tif chainConfig.ID == chainID {\n\t\t\tfmt.Println(\"Database has been initialised by chainID\", chainID, \"sometimes before\")\n\t\t} else {\n\t\t\tutils.Fatalf(\"Database has been initialised by chainID \" + chainConfig.ID)\n\t\t}\n\t} else {\n\t\terr := Set(regDb, \"chainConfig\", &Identity{\n\t\t\tName: \"\",\n\t\t\tID: chainID,\n\t\t\tHashky: \"\",\n\t\t\tExtInfo: \"\",\n\t\t})\n\t\tif err != nil {\n\t\t\tutils.Fatalf(\"Failed to initialise database: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func (db *DB) Init() (dbCreated bool, err error) {\n\tdbCreated = false\n\n\t// Ensure root exists\n\terr = os.MkdirAll(db.Root, 0755)\n\tif err != nil {\n\t\treturn dbCreated, err\n\t}\n\n\t// Ensure database exists\n\t_, err = os.Stat(db.DBPath)\n\tif err == nil {\n\t\treturn dbCreated, nil // exists\n\t}\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn dbCreated, err // unexpected error\n\t}\n\n\t// Database does not exist - create\n\tfh, err := os.Create(db.DBPath)\n\tfh.Close()\n\tif err != nil {\n\t\treturn dbCreated, err\n\t}\n\tdbCreated = true\n\n\t// Ensure configfile exists\n\t_, err = os.Stat(db.ConfigPath)\n\tif err == nil {\n\t\t_, err := db.readConfig()\n\t\tif err != nil {\n\t\t\tif err != ErrNotFound {\n\t\t\t\treturn dbCreated, err\n\t\t\t}\n\t\t}\n\t\terr = db.appendConfigString(db.configPlaceholder())\n\t\tif err != nil {\n\t\t\treturn dbCreated, err\n\t\t}\n\t} else {\n\t\t// Create a placeholder config file for domain\n\t\terr = db.writeConfigString(db.configPlaceholder())\n\t\tif err != nil {\n\t\t\treturn dbCreated, err\n\t\t}\n\t}\n\n\treturn dbCreated, nil\n}", "func (d *DB) Init(c *Controller, dbFile string) error {\n\td.c = c\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\td.db = db\n\t//defer d.db.Close()\n\n\t_, err = d.db.Exec(`\n create table if not exists articles(\n\t\t\tid integer not null primary key,\n\t\t\tfeed text,\n\t\t\ttitle text,\n\t\t\tcontent text,\n\t\t\tlink text,\n\t\t\tread bool,\n\t\t\tdisplay_name string,\n\t\t\tdeleted bool,\n\t\t\tpublished DATETIME\n\t\t);`)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func InitDatabase(dbPath string, dbFile string, force bool) {\n\tif dbPath == \"\" {\n\t\tDbPath = defaultDbPath\n\t} else {\n\t\tDbPath = dbPath\n\t}\n\tif dbFile == \"\" {\n\t\tDbFile = defaultDbFile\n\t} else {\n\t\tDbFile = dbFile\n\t}\n\n\texisted, err := pathutil.DirExists(DbPath)\n\tcheckError(err)\n\tif existed {\n\t\tif force {\n\t\t\tos.RemoveAll(DbPath)\n\t\t\tlog.Info(\"Remove old dbPath and recreate: %s\", DbPath)\n\t\t\tos.MkdirAll(DbPath, os.ModePerm)\n\t\t} else {\n\t\t\tlog.Info(\"Database directory (%s) existed. Nothing happended. Use --force to reinit\", DbPath)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tos.MkdirAll(DbPath, os.ModePerm)\n\t}\n}", "func InitDB(config *defs.Config) error {\n\t// Connect as the superuser.\n\tvar db, err = sql.Open(\"postgres\",\n\t\t\"user=postgres dbname=postgres sslmode=disable\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar statements = [][]string{\n\t\t{\"Dropping old database and roles...\",\n\t\t\t\"DROP DATABASE IF EXISTS \" + config.DatabaseName},\n\t\t{\"\", \"DROP ROLE IF EXISTS \" + config.DatabaseUserName},\n\t\t{\"Creating new database and roles...\",\n\t\t\t\"CREATE ROLE \" + config.DatabaseUserName + \" WITH LOGIN\"},\n\t\t{\"\", \"CREATE DATABASE \" + config.DatabaseName +\n\t\t\t\" WITH OWNER \" + config.DatabaseUserName},\n\t}\n\tfor _, s := range statements {\n\t\tif s[0] != \"\" {\n\t\t\tlog.Println(s[0])\n\t\t}\n\t\t_, err = db.Exec(s[1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func InitDb() {\n\tdbConnection.MustExec(schema)\n}", "func initializeDB(path string) error {\n\tstore, err := buntdb.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer store.Close()\n\n\terr = store.Update(func(tx *buntdb.Tx) error {\n\t\t// set schema version\n\t\ttx.Set(keySchemaVersion, strconv.Itoa(latestDbSchema), nil)\n\t\ttx.Set(keyCloakSecret, utils.GenerateSecretKey(), nil)\n\t\treturn nil\n\t})\n\n\treturn err\n}", "func InitDB(dbPath string) *gorm.DB {\n\tdb, err := gorm.Open(\"sqlite3\", dbPath)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tdb.AutoMigrate(&GPSDataSet{})\n\n\treturn db\n}", "func initDB(db *sql.DB) error {\n\t_, err := db.Exec(`CREATE TABLE devices (uid varchar(1000), timestamp timestamp, version varchar(1000), data varchar(5000), PRIMARY KEY (uid, timestamp));`)\n\treturn err\n}", "func (db *DbCtxt) InitDatabase() error {\n\tvar models []interface{}\n\tmodels = append(models,\n\t\t&Hotel{},\n\t\t&Room{},\n\t\t&RatePlan{},\n\t)\n\tfor _, model := range models {\n\t\terr := db.client.AutoMigrate(model)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func InitDB() (*gorm.DB, error) {\n\t// attempt to open a new connection to the db\n\tglog.Info(\"Opening a new connection to the db...\")\n\tconnStr := fmt.Sprintf(\n\t\t\"%s:%s@(%s)/%s?charset=utf8&parseTime=True&loc=Local\", \n\t\tconfig.DbUsername, config.DbPassword, config.DbHostName, config.DbName,\n\t)\n\tdb, err := gorm.Open(config.DbDriver, connStr);\n\tif err != nil {\n\t\treturn db, err\n\t}\n\treturn db, err\n}", "func (c *PostgresClient) InitDB(models []interface{}) (*gorm.DB, error) {\n\tc.LogConfig()\n\terr := c.Connect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.DB.LogMode(c.LogMode)\n\tc.CreateDBExtensions()\n\tc.Migrate(models)\n\treturn c.DB, nil\n}", "func InitDB(path string) error {\n\tdb, err := sql.Open(\"sqlite3\", path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\tfor _, tableSQL := range SQLCreateTables() {\n\t\tif _, err := db.Exec(tableSQL); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Use Write Ahead Logging which improves SQLite concurrency.\n\t// Requires SQLite >= 3.7.0\n\tif _, err := db.Exec(\"PRAGMA journal_mode = WAL\"); err != nil {\n\t\treturn err\n\t}\n\n\t// Check if the WAL mode was set correctly\n\tvar journalMode string\n\tif err = db.QueryRow(\"PRAGMA journal_mode\").Scan(&journalMode); err != nil {\n\t\tlog.Fatalf(\"Unable to determine sqlite3 journal_mode: %v\", err)\n\t}\n\tif journalMode != \"wal\" {\n\t\tlog.Fatal(\"SQLite Write Ahead Logging (introducted in v3.7.0) is required. See http://perkeep.org/issue/114\")\n\t}\n\n\t_, err = db.Exec(fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, SchemaVersion()))\n\treturn err\n}", "func Init(dbpath string) {\n\tdatabase.db, err = sql.Open(\"sqlite3\", dbpath+\"?loc=auto&parseTime=true\")\n\t// database.db, err = sql.Open(\"mysql\", \"Username:Password@tcp(Host:Port)/standardnotes?parseTime=true\")\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif database.db == nil {\n\t\tlog.Fatal(\"db nil\")\n\t}\n\tdatabase.createTables()\n}", "func InitDb() *gorm.DB {\n\t// Openning file\n\tdb, err := gorm.Open(\"sqlite3\", \"./data.db\")\n\t// Display SQL queries\n\tdb.LogMode(true)\n\n\t// Error\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// Creating the table\n\tif !db.HasTable(&Users{}) {\n\t\tdb.CreateTable(&Users{})\n\t\tdb.Set(\"gorm:table_options\", \"ENGINE=InnoDB\").CreateTable(&Users{})\n\t}\n\n\treturn db\n}", "func initDB(dbFile string) (*sql.DB, error) {\n\n\tDB, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error opening Database connection: %v\", err)\n\t\treturn nil, err\n\t}\n\treturn DB, nil\n}", "func InitDB() *DB {\n\treturn &DB{\n\t\tActiveReq: InitActReq(),\n\t\tInactiveReq: &InactiveReq{},\n\t}\n}", "func DBInit() {\n\t// Mode = \"PRODUCTION\"\n\t// if Mode == \"PRODUCTION\" {\n\t// \tDatabaseURL = \"test.sqlite3\"\n\t// \tDatabaseName = \"sqlite3\"\n\t// } else if Mode == \"DEPLOY\" {\n\tDatabaseURL = os.Getenv(\"DATABASE_URL\")\n\tDatabaseName = \"postgres\"\n\t// }\n\n\tdb, err := gorm.Open(DatabaseName, DatabaseURL)\n\tif err != nil {\n\t\tpanic(\"We can't open database!(dbInit)\")\n\t}\n\t//残りのモデルはまだ入れてない。\n\tdb.AutoMigrate(&model.Post{})\n\tdb.AutoMigrate(&model.User{})\n\tdb.AutoMigrate(&model.Room{})\n\tdefer db.Close()\n}", "func InitDatabase(db *sql.DB) {\n\tcreateLinksTableSQL := `CREATE TABLE IF NOT EXISTS links (\n\t\t\"id\" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n\t\t\"url\" TEXT,\n\t\t\"created_at\" TEXT\n\t);`\n\n\tstatement, err := db.Prepare(createLinksTableSQL)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating links table: %v\\n\", err)\n\t}\n\tstatement.Exec()\n}", "func initDB(databaseFile string) {\n\tvar err error\n\tdb, err = sql.Open(\"sqlite3\", databaseFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"Initalized connection with \" + databaseFile + \"!\")\n}", "func DBInit(file string) *PSQLService {\n\tcfg := config.NewConfig(file)\n\ts := cfg.Service\n\tpsql := fmt.Sprintf(psqlInfo, s.Host, s.Port, s.User, s.Password, s.Name)\n\tdb, err := sql.Open(driverName, psql)\n\tif err != nil {\n\t\tlog.Printf(\"Error opening SQL db: %s\", err.Error())\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Printf(\"Error pingng SQL db: %s\", err.Error())\n\t}\n\treturn &PSQLService{\n\t\tDB: db,\n\t}\n}", "func (r *DarwinTimetable) initDB() error {\n\n\tbuckets := []string{\n\t\t\"Meta\",\n\t\t\"DarwinAssoc\",\n\t\t\"DarwinJourney\"}\n\n\treturn r.db.Update(func(tx *bolt.Tx) error {\n\n\t\tfor _, n := range buckets {\n\t\t\tvar nb []byte = []byte(n)\n\t\t\tif bucket := tx.Bucket(nb); bucket == nil {\n\t\t\t\tif _, err := tx.CreateBucket(nb); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func InitDB() error {\n\tvar clientPath string = path.Join(dbPath, \"clients\")\n\tvar serverPath string = path.Join(dbPath, \"server\")\n\tvar serverInterfacePath string = path.Join(serverPath, \"interfaces.json\")\n\tvar serverKeyPairPath string = path.Join(serverPath, \"keypair.json\")\n\tvar globalSettingPath string = path.Join(serverPath, \"global_settings.json\")\n\tvar userPath string = path.Join(serverPath, \"users.json\")\n\n\t// create directories if they do not exist\n\tif _, err := os.Stat(clientPath); os.IsNotExist(err) {\n\t\tos.MkdirAll(clientPath, os.ModePerm)\n\t}\n\tif _, err := os.Stat(serverPath); os.IsNotExist(err) {\n\t\tos.MkdirAll(serverPath, os.ModePerm)\n\t}\n\n\t// server's interface\n\tif _, err := os.Stat(serverInterfacePath); os.IsNotExist(err) {\n\t\tdb, err := DBConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tserverInterface := new(model.ServerInterface)\n\t\tserverInterface.Addresses = []string{defaultServerAddress}\n\t\tserverInterface.ListenPort = defaultServerPort\n\t\tserverInterface.UpdatedAt = time.Now().UTC()\n\t\tdb.Write(\"server\", \"interfaces\", serverInterface)\n\t}\n\n\t// server's key pair\n\tif _, err := os.Stat(serverKeyPairPath); os.IsNotExist(err) {\n\t\tdb, err := DBConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tkey, err := wgtypes.GeneratePrivateKey()\n\t\tif err != nil {\n\t\t\treturn scribble.ErrMissingCollection\n\t\t}\n\t\tserverKeyPair := new(model.ServerKeypair)\n\t\tserverKeyPair.PrivateKey = key.String()\n\t\tserverKeyPair.PublicKey = key.PublicKey().String()\n\t\tserverKeyPair.UpdatedAt = time.Now().UTC()\n\t\tdb.Write(\"server\", \"keypair\", serverKeyPair)\n\t}\n\n\t// global settings\n\tif _, err := os.Stat(globalSettingPath); os.IsNotExist(err) {\n\t\tdb, err := DBConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpublicInterface, err := GetPublicIP()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tglobalSetting := new(model.GlobalSetting)\n\t\tglobalSetting.EndpointAddress = publicInterface.IPAddress\n\t\tglobalSetting.DNSServers = []string{defaultDNS}\n\t\tglobalSetting.MTU = defaultMTU\n\t\tglobalSetting.PersistentKeepalive = defaultPersistentKeepalive\n\t\tglobalSetting.ConfigFilePath = defaultConfigFilePath\n\t\tglobalSetting.UpdatedAt = time.Now().UTC()\n\t\tdb.Write(\"server\", \"global_settings\", globalSetting)\n\t}\n\n\t// user info\n\tif _, err := os.Stat(userPath); os.IsNotExist(err) {\n\t\tdb, err := DBConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tuser := new(model.User)\n\t\tuser.Username = getCredVar(username_env_var, defaultUsername)\n\t\tuser.Password = getCredVar(password_env_var, defaultPassword)\n\t\tdb.Write(\"server\", \"users\", user)\n\t}\n\n\treturn nil\n}", "func (d *DB) InitDB() {\n\tdb, err := gorm.Open(\"mysql\", \"root@/users?charset=utf8&parseTime=True&loc=Local\")\n\tif err != nil {\n\t\tpanic(\"failed to connect to the database :(\")\n\t}\n\t// defer db.Close()\n\n\tdb.AutoMigrate(&User{})\n\n\td.db = db\n\td.CodeMap = make(map[string]CodeItem)\n\td.CodeMap[\"\"] = CodeItem{Code: -1}\n}", "func InitDB() {\n\tvar err error\n\tdb, err = sql.Open(\"sqlite3\", \"../../resources/imgtag.db\")\n\tif err != nil {\n\t\tlog.Fatal(\"can't open db\", err)\n\t}\n}", "func (st *Store) initDB() error {\n\n\tvar err error\n\n\tver, err := st.schemaVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch ver {\n\tcase 0:\n\t\t// starting from scratch\n\t\tschema := `\nCREATE TABLE url (\n\tid INTEGER PRIMARY KEY,\n\turl TEXT NOT NULL,\n\thash TEXT NOT NULL,\n\tpage_id INTEGER NOT NULL,\n\tcreated TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,\n\tFOREIGN KEY(page_id) REFERENCES page(id)\n);\n\nCREATE TABLE page (\n\tid INTEGER PRIMARY KEY,\n\tcanonical_url TEXT NOT NULL,\n\ttitle TEXT NOT NULL,\n\tcreated TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL\n);\n\nCREATE TABLE warning (\n\tid INTEGER PRIMARY KEY,\n\tpage_id INTEGER NOT NULL,\n\tkind TEXT NOT NULL,\n\tquant INT NOT NULL,\n\tcreated TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,\n\tFOREIGN KEY(page_id) REFERENCES page(id)\n);\n\n\nCREATE TABLE version (\n\tver INTEGER NOT NULL );\n\nINSERT INTO version (ver) VALUES (1);\n`\n\t\t//\t\t`CREATE INDEX article_tag_artid ON article_tag(article_id)`,\n\t\t//\t\t`CREATE INDEX article_url_artid ON article_url(article_id)`,\n\n\t\t_, err = st.db.Exec(schema)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\tcase 1: // all good. this is what we're expecting\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"Bad db schema version (expected 1, got %d)\", ver)\n\t}\n\n\treturn nil\n}", "func createNewDB(log *logrus.Entry, cnf *Config) error {\n\tvar err error\n\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\",\n\t\tcnf.DBHost, cnf.DBPort, cnf.DBUser, cnf.DBPassword, cnf.DBName)\n\n\tdb, err = sql.Open(\"postgres\", psqlInfo)\n\n\tif err != nil {\n\t\tlog.WithError(err).Fatalf(\"Failed to connect to db\")\n\t}\n\n\t//try to ping the db\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.WithError(err).Fatalf(\"Failed to ping db\")\n\t}\n\n\tif err = helpers.MigrateDB(db, cnf.SQLMigrationDir); err != nil {\n\t\treturn err\n\t}\n\n\tboil.SetDB(db)\n\n\treturn nil\n}", "func (src *DataSrc) InitDB(conf *viper.Viper) error {\n\tdbEngine := conf.GetString(\"db.engine\")\n\tdbHost := conf.GetString(\"db.host\")\n\tdbPort := conf.GetString(\"db.port\")\n\tdbAddr := dbHost + \":\" + dbPort\n\tdbName := conf.GetString(\"db.name\")\n\tdbUser := conf.GetString(\"db.user\")\n\tdbPassword := conf.GetString(\"db.password\")\n\n\tvar errdb error\n\tvar db *sqlx.DB\n\tif dbEngine == \"postgres\" {\n\t\tdb, errdb = sqlx.Connect(\"postgres\", \"host=\"+dbHost+\" port=\"+dbPort+\" user=\"+dbUser+\" password=\"+dbPassword+\" dbname=\"+dbName+\" sslmode=disable\")\n\t\tif errdb != nil {\n\t\t\tfmt.Println(\"Error connectiing to DB \", errdb)\n\t\t\treturn errdb\n\t\t}\n\t\tsrc.DB = db\n\n\t} else if dbEngine == \"mysql\" {\n\t\tdb, errdb = sqlx.Connect(\"mysql\", dbUser+\":\"+dbPassword+\"@\"+dbAddr+\"/\"+dbName+\"?charset=utf8&parseTime=True&loc=Local\")\n\t\tif errdb != nil {\n\t\t\tfmt.Println(\"Error connectiing to DB \", errdb)\n\t\t\treturn errdb\n\t\t}\n\t\tsrc.DB = db\n\n\t} else if dbEngine == \"sqlite\" {\n\t\tdb, errdb = sqlx.Connect(\"sqlite3\", dbName)\n\t\tif errdb != nil {\n\t\t\tfmt.Println(\"Error connecting to DB \", errdb)\n\t\t\treturn errdb\n\t\t}\n\t\tsrc.DB = db\n\t}\n\treturn nil\n\n}", "func InitDB() *gorm.DB {\n\tconf := envloader.LoadConfig()\n\tdb, err := gorm.Open(\"sqlite3\", \"db/\"+conf.ENV+\".db\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn db\n}", "func InitDb() {\n\tconfig, err := dbConfig()\n\tif err != nil {\n\t\tglog.Fatalln(err)\n\t}\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%s user=%s \"+\n\t\t\"password=%s dbname=%s sslmode=disable\",\n\t\tconfig[dbhost], config[dbport],\n\t\tconfig[dbuser], config[dbpass], config[dbname])\n\n\tDb, err = sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tglog.Fatalln(err)\n\t}\n\terr = Db.Ping()\n\tif err != nil {\n\t\tglog.Fatalln(err)\n\t}\n\tglog.Infoln(\"Successfully connected to Database!\")\n\t// Create table in database if not present\n\tcreateTable()\n}", "func dbInit(dbc co.DbConnectionRequest) {\n\tdb, err := sql.Open(\"mysql\", dbc.User+\":\"+dbc.Pwd+\"@tcp(\"+dbc.Server+\":\"+dbc.Port+\")/\")\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tfor _, stmt := range organizationsSchema {\n\t\tfmt.Println(stmt)\n\t\t_, err := db.Exec(stmt)\n\t\tif err != nil {\n\t\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t\t}\n\t}\n\tdb.Close()\n\treturn\n}", "func (c *Client) InitDb(db *rds.DBInstance, password, dbname string) error {\n\tport := strconv.FormatInt(*db.Endpoint.Port, 10)\n\thost := *db.Endpoint.Address\n\trdsEngine := *db.Engine\n\tuser := *db.MasterUsername\n\n\tvar engine string\n\n\tif v, ok := engineType[rdsEngine]; ok {\n\t\tengine = v\n\t}\n\n\tvar args string\n\n\tif e, ok := engineConnection[rdsEngine]; ok {\n\t\targs = fmt.Sprintf(e,\n\t\t\thost, port, user, password, dbname)\n\t}\n\n\tvar err error\n\n\tc.DB, err = sql.Open(engine, args)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Couldn't open connection to database\")\n\t\treturn err\n\t}\n\n\terr = c.DB.Ping()\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Couldn't ping database\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func initDb(db *sql.DB) error {\n\n\t_, err := db.Exec(`CREATE TABLE IF NOT EXISTS resource_metadata (\n\t\t\tid TEXT NOT NULL,\n\t\t\ttype TEXT NOT NULL,\n\t\t\tcreated_at TIMESTAMP NOT NULL,\n\t\t\tupdated_at TIMESTAMP NOT NULL,\n\t\t\tdeleted_at TIMESTAMP,\n\t\t\tparams JSONB NOT NULL,\n\t\t\tdata JSONB NOT NULL,\n\t\t\tPRIMARY KEY (id)\n\t)`)\n\tif err != nil {\n\t\tlog.Println(\"Unable to create resource_metadata table.\")\n\t\treturn fmt.Errorf(\"create resource_metadata table: %w\", err)\n\t}\n\n\treturn nil\n}", "func DbInit() (*sql.DB, error) {\n\tvar db *sql.DB\n\tvar err error\n\tenv := EnvVars()\n\tparams := fmt.Sprintf(\"postgres://%s@%s/%s?sslmode=disable\",\n\t\tenv.DbUser, env.DbHost, env.Db)\n\tdb, err = sql.Open(\"postgres\", params)\n\treturn db, err\n}", "func (db *EdDb) initDbSchema() (err error) {\n\n\t// First, the persistent parts of the database (main.), then the\n\t// ephemeral parts (mem.)\n\t_, err = db.dbConn.Exec(`\n\n CREATE TABLE IF NOT EXISTS main.person (\n id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n numUpdates INTEGER DEFAULT 0\n );\n\n CREATE TABLE mem.sessionActivity (\n id INTEGER PRIMARY KEY,\n personId INTEGER NOT NULL,\n dateTime DATETIME DEFAULT CURRENT_TIMESTAMP\n );\n `)\n\treturn\n}", "func InitDB(host string, dbname string, user string, password string) (*sql.DB, error) {\n\tdb, err := sql.Open(\n\t\t\"postgres\", fmt.Sprintf(\"host=%s user=%s password=%s dbname=%s sslmode=disable\", host, user, password, dbname))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func InitDB(cfg abcconfig.DBConfig) error {\n\t// No username provided is a signal to skip database usage\n\tif len(cfg.User) == 0 {\n\t\treturn nil\n\t}\n\n\tconnStr, err := abcdatabase.GetConnStr(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tDB, err = sql.Open(cfg.DB, connStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := DB.Ping()\n\tif p != nil {\n\t\treturn p\n\t}\n\n\treturn nil\n}", "func Init(CDB config.DatabaseConfig) (db *DataBase, err error) {\n\n\t// for local launch\n\tif os.Getenv(CDB.URL) == \"\" {\n\t\tos.Setenv(CDB.URL, \"user=db_forum_user password=db_forum_password dbname=db_forum sslmode=disable\")\n\t}\n\n\tvar database *sql.DB\n\tif database, err = sql.Open(CDB.DriverName, os.Getenv(CDB.URL)); err != nil {\n\t\tutils.PrintDebug(\"database/Init cant open:\" + err.Error())\n\t\treturn\n\t}\n\n\tdb = &DataBase{\n\t\tDb: database,\n\t}\n\tdb.Db.SetMaxOpenConns(CDB.MaxOpenConns)\n\n\tif err = db.Db.Ping(); err != nil {\n\t\tutils.PrintDebug(\"database/Init cant access:\" + err.Error())\n\t\treturn\n\t}\n\tutils.PrintDebug(\"database/Init open\")\n\tif err = db.CreateTables(); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func DBInit() *gorm.DB {\n\t//db, err := gorm.Open(\"mysql\", \"root:@tcp(128.199.211.144:3306)/godb?charset=utf8&parseTime=True&loc=Local\")\n\tdb, err := gorm.Open(\"mysql\",\"root:orion2402@tcp(localhost:3306)/popfren?charset=utf8&parseTime=True&loc=Local\")\n\tif err != nil {\n\t\tpanic(\"failed to connect to database\")\n\t}\n\n\tdb.AutoMigrate(structs.Person{})\n\treturn db\n}", "func InitDB(path string) error {\n\tif err := checkDBReadyForInit(path); err != nil {\n\t\treturn err\n\t}\n\n\tif err := initializeDB(path); err != nil {\n\t\treturn fmt.Errorf(\"Could not save datastore: %w\", err)\n\t}\n\treturn nil\n}", "func initSqlDB(sqlFName string) error {\n\tcmd := exec.Command(\"mysql\")\n\tfName, err := filepath.Abs(sqlFName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd.Stdin = strings.NewReader(fmt.Sprintf(\"source %s\", fName))\n\tstdoutStderr, err := cmd.CombinedOutput()\n\tlog.Printf(\"%s\\n\", stdoutStderr)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func InitDb(host string, user string, port int, sslmode string, dbName string, password string) (interfaces.Database, error) {\n\tconnStr := fmt.Sprintf(\n\t\t\"host=%s user=%s port=%d sslmode=%s dbname=%s\",\n\t\thost, user, port, sslmode, dbName,\n\t)\n\tif password != \"\" {\n\t\tconnStr += fmt.Sprintf(\" password=%s\", password)\n\t}\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb.SetMaxIdleConns(5)\n\tdb.SetMaxOpenConns(10)\n\n\tdbmap := &gorp.DbMap{\n\t\tDb: db,\n\t\tDialect: gorp.PostgresDialect{},\n\t\tTypeConverter: util.TypeConverter{},\n\t}\n\n\tdbmap.AddTableWithName(Game{}, \"games\").SetKeys(true, \"ID\")\n\tdbmap.AddTableWithName(Player{}, \"players\").SetKeys(true, \"ID\")\n\tdbmap.AddTableWithName(EncryptedPlayer{}, \"encrypted_players\")\n\tdbmap.AddTableWithName(Clan{}, \"clans\").SetKeys(true, \"ID\")\n\tdbmap.AddTableWithName(Membership{}, \"memberships\").SetKeys(true, \"ID\")\n\tdbmap.AddTableWithName(Hook{}, \"hooks\").SetKeys(true, \"ID\")\n\n\t// dbmap.TraceOn(\"[gorp]\", log.New(os.Stdout, \"KHAN:\", log.Lmicroseconds))\n\treturn egorp.New(dbmap, dbName), nil\n}", "func InitDB() {\n\tdatabase, err := sql.Open(\"mysql\", \"jiraiya:Shivi<323@tcp(database-1.caqh2nel7qhl.us-east-2.rds.amazonaws.com:3306)/cumul\")\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tDB = database\n}", "func InitDB() {\n\tvar err error\n\tvar connectionString = fmt.Sprintf(\"%v:%v@%v/%v\", dbUser, dbPassword, dbHost, dbName)\n\tlog.Println(\"Connection String: \" + connectionString)\n\tdb, err = sql.Open(\"mysql\", connectionString)\n\tdbmap = &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{Engine: \"InnoDB\", Encoding: \"UTF8\"}}\n\tdbmap.TraceOn(\"[gorp]\", log.New(os.Stdout, \"myapp:\", log.Lmicroseconds))\n\tif err != nil {\n\t\tlog.Println(\"Failed to connect to database: \")\n\t\tlog.Panic(err)\n\t} else {\n\t\terr = db.Ping()\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to ping database: \")\n\t\t\tlog.Panic(err)\n\t\t} else {\n\t\t\tlog.Println(\"Database connected.\")\n\t\t}\n\t}\n\n\t_ = dbmap.AddTableWithName(Article{}, \"flat_articles\").SetKeys(false, \"ID\")\n\tdbmap.CreateTablesIfNotExists()\n}", "func InitDB() {\n\tconnStr := \"user=osama dbname=hackernews password=ibnjunaid \"\n\t// Use root:dbpass@tcp(172.17.0.2)/hackernews, if you're using Windows.\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tDb = db\n\n}", "func InitDb(){\r\n\tconnectionURL:=os.Getenv(\"CONNECTION_URL\")\r\n\tvar err error\r\n\tDBConn, err = gorm.Open(\"postgres\",connectionURL)\r\n\tif err!= nil{\r\n\t\tpanic(\"failed to connect to db\")\r\n\t}\r\n\tfmt.Println(\"db is connected lets go.........\")\r\n\tDBConn.AutoMigrate(&models.GoItems{})\r\n\tfmt.Println(\"db has been migrated\")\r\n}", "func dbInit() {\n\t//User Input\n\tusernm := creds.UserName\n\tpass := creds.Password\n\tDBName := creds.DBName\n\tlit.Debug(\"Hit dbInit \" + DBName)\n\tlog.Println(usernm + \":\" + pass + \"@tcp(127.0.0.1:3306)/\")\n\n\tdb, err := sql.Open(\"mysql\", usernm+\":\"+pass+\"@tcp(127.0.0.1:3306)/\")\n\terr = db.Ping() //Need to ping to generate connection and trigger err\n\tif err != nil {\n\t\tlit.Error(\"Error in Init Log-in\")\n\t\tcreds = getCreds()\n\t\tfile, _ := json.MarshalIndent(creds, \"\", \"\\t\")\n\t\t_ = ioutil.WriteFile(\"configs/creds.json\", file, 0644)\n\t} else {\n\t\tlit.Debug(\"Attempt DB Creation\")\n\t\t_, err = db.Exec(\"CREATE DATABASE \" + DBName)\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t} else {\n\t\t\tlog.Println(\"Database Created:\", \"\\\"\"+DBName+\"\\\"\")\n\t\t}\n\t\tdb.Exec(\"USE \" + DBName)\n\t\tstmt, err := db.Prepare(\"CREATE TABLE `employee` (`id` int(6) unsigned NOT NULL AUTO_INCREMENT,`name` varchar(30) NOT NULL,`city` varchar(30) NOT NULL,PRIMARY KEY (`id`));\")\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t} else {\n\t\t\t_, err = stmt.Exec()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err.Error())\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Table Created\", \"\\\"\"+\"employees\"+\"\\\"\")\n\t\t\t}\n\t\t}\n\t}\n}", "func InitDb() *gorm.DB {\n\tdb := openConnection()\n\n\tmodels.RunMigrations(db)\n\treturn db\n}", "func initDb(username, password, endpoint, port, database string) (*sql.DB, error) {\n\t// Create url for connection\n\turl := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s?parseTime=true\", username, password, endpoint, port, database)\n\n\t// Open connection to SQL DB\n\tdb, err := sql.Open(\"mysql\", url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Test database connection\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, err\n}", "func initDb() *gorp.DbMap {\n\n\tdb, err := sql.Open(\"postgres\", os.Getenv(\"DATABASE_URL_CPFC\"))\n\n\tif err != nil {\n\t\tH.CheckError(err)\n\t}\n\n\tdbmap := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}}\n\treturn dbmap\n}", "func (db database) Init() error {\n\tscript := `CREATE TABLE IF NOT EXISTS txs (\n\t\thash VARCHAR NOT NULL PRIMARY KEY,\n\t\tstatus SMALLINT,\n\t\tcreated_time BIGINT,\n\t\tselector VARCHAR(255),\n\t\ttxid VARCHAR,\n\t\ttxindex BIGINT,\n\t\tamount VARCHAR(100),\n\t\tpayload VARCHAR,\n\t\tphash VARCHAR,\n\t\tto_address VARCHAR,\n\t\tnonce VARCHAR,\n\t\tnhash VARCHAR,\n\t\tgpubkey VARCHAR,\n\t\tghash VARCHAR,\n\t\tversion VARCHAR\n\t);\nCREATE TABLE IF NOT EXISTS gateways (\n\t\tgateway_address VARCHAR NOT NULL PRIMARY KEY,\n\t\tstatus SMALLINT,\n\t\tcreated_time BIGINT,\n\t\tselector VARCHAR(255),\n\t\tpayload VARCHAR,\n\t\tphash VARCHAR,\n\t\tto_address VARCHAR,\n\t\tnonce VARCHAR,\n\t\tnhash VARCHAR,\n\t\tgpubkey VARCHAR,\n\t\tghash VARCHAR,\n\t\tversion VARCHAR\n);\n`\n\t_, err := db.db.Exec(script)\n\treturn err\n}", "func DBInit() *gorm.DB {\n\te := godotenv.Load() //Load .env file\n\tif e != nil {\n\t\tfmt.Print(e)\n\t}\n\n\thost := os.Getenv(\"DB_HOST\")\n\tport := os.Getenv(\"DB_PORT\")\n\tpassword := os.Getenv(\"DB_PASSWORD\")\n\tdbUser := os.Getenv(\"DB_USER\")\n\tdbName := os.Getenv(\"DB_NAME\")\n\tdbURI := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s?charset=utf8&parseTime=True&loc=%s\", dbUser, password, host, port, dbName, \"Asia%2FJakarta\")\n\n\tdb, err := gorm.Open(\"mysql\", dbURI)\n\tif err != nil {\n\t\tlog.Panicf(\"failed to connect to database with err : %s \", err)\n\t}\n\tdb.DB().SetConnMaxLifetime(time.Minute * 5)\n\tdb.DB().SetMaxIdleConns(0)\n\tdb.DB().SetMaxOpenConns(5)\n\n\tdb.LogMode(true)\n\n\tdB = db\n\tdb.AutoMigrate(\n\t\t&domain.Transaction{},\n\t\t&domain.TransactionDetail{},\n\t\t&domain.Cart{},\n\t\t&domain.CartDetail{},\n\t\t&domain.Product{},\n\t\t&domain.StatusCode{},\n\t)\n\treturn dB\n}", "func InitDB() *DB {\n\tdb, err := gorm.Open(dbEngine, dbName)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to initialize database: \", err.Error())\n\t}\n\n\tdb.AutoMigrate(&inventory.Stock{}, &inventory.StockIn{}, &inventory.StockOut{}, &inventory.StockValue{}, &inventory.SaleReport{})\n\n\treturn &DB{db}\n}", "func InitializeDb() {\n\tdbPort, err := strconv.Atoi(os.Getenv(\"DB_PORT\"))\n\tif err != nil {\n\t\tlog.Fatal(\"Database port is not valid\")\n\t}\n\n\tdbConnString := fmt.Sprintf(\n\t\t\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\",\n\t\tos.Getenv(\"DB_HOST\"),\n\t\tdbPort,\n\t\tos.Getenv(\"DB_USER\"),\n\t\tos.Getenv(\"DB_PASS\"),\n\t\tos.Getenv(\"DB_NAME\"),\n\t)\n\n\tDB, err = sql.Open(\"postgres\", dbConnString)\n\tif err != nil {\n\t\tlog.Fatal(\"Could not connect to db- \", err)\n\t}\n}", "func initDB() {\n\tvar err error\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%d user=%s \"+\n\t\t\"password=%s dbname=%s sslmode=disable\",\n\t\thost, port, user, password, dbname)\n\tdb, err = sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}", "func InitDB() *gorm.DB {\n\tdbConf := GetPostgresConfig()\n\tdbSpec := fmt.Sprintf(\n\t\t\"host=%s port=%s dbname=%s user=%s password=%s sslmode=%s\",\n\t\tdbConf.Host, dbConf.Port, dbConf.DB, dbConf.User, dbConf.Password, dbConf.SSLMode,\n\t)\n\tconn, err := gorm.Open(\"postgres\", dbSpec)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to connect to database with config: %s, err: %s\", dbSpec, err))\n\t}\n\tdb = conn\n\tmigrate() // apply database migrations\n\tif os.Getenv(\"DEBUG\") == \"true\" {\n\t\tdb.LogMode(true)\n\t}\n\treturn db\n}", "func (uts *UnapprovedTransactions) InitDB() error {\n\treturn uts.DB.CreateTable(uts.getTableName(), \"VARBINARY(100)\", \"LONGBLOB\")\n}", "func InitializeDB(dbfile string) error {\n\tCloseDB()\n\tdb, err := prepareDB(dbfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadb = &agendaDB{\n\t\tdb: db,\n\t\tfile: dbfile,\n\t\tqueue: startExecQueue(db),\n\t}\n\treturn nil\n}", "func InitDatabase() *Database {\n\t// eg. \"postgres://postgres:postgres@localhost/postgres?sslmode=disable\"\n\t// TODO: enable SSL on DB\n\tconn, err := sql.Open(\"postgres\", os.Getenv(\"PG_CONNECTION_STRING\"))\n\tif err != nil {\n\t\tlog.Fatal(err) // kill server if we can't use DB on startup\n\t}\n\treturn &Database{\n\t\tconn: conn,\n\t}\n}", "func (bdm *MySQLDBManager) InitDatabase() error {\n\n\tbdm.OpenConnection()\n\n\tdefer bdm.CloseConnection()\n\n\tbc, err := bdm.GetBlockchainObject()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = bc.InitDB()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttxs, err := bdm.GetTransactionsObject()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = txs.InitDB()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tutx, err := bdm.GetUnapprovedTransactionsObject()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = utx.InitDB()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuos, err := bdm.GetUnspentOutputsObject()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = uos.InitDB()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tns, err := bdm.GetNodesObject()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ns.InitDB()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdr, err := bdm.GetDataReferencesObject()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = dr.InitDB()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (fact FileFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {\n\tsingletonLock.Lock()\n\tdefer singletonLock.Unlock()\n\n\tif s, ok := singletons[urlObj.Path]; ok {\n\t\treturn s.ddb, s.vrw, s.ns, nil\n\t}\n\n\tpath, err := url.PathUnescape(urlObj.Path)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tpath = filepath.FromSlash(path)\n\tpath = urlObj.Host + path\n\n\terr = validateDir(path)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tvar useJournal bool\n\tif params != nil {\n\t\t_, useJournal = params[ChunkJournalParam]\n\t}\n\n\tvar newGenSt *nbs.NomsBlockStore\n\tq := nbs.NewUnlimitedMemQuotaProvider()\n\tif useJournal && chunkJournalFeatureFlag {\n\t\tnewGenSt, err = nbs.NewLocalJournalingStore(ctx, nbf.VersionString(), path, q)\n\t} else {\n\t\tnewGenSt, err = nbs.NewLocalStore(ctx, nbf.VersionString(), path, defaultMemTableSize, q)\n\t}\n\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\toldgenPath := filepath.Join(path, \"oldgen\")\n\terr = validateDir(oldgenPath)\n\tif err != nil {\n\t\tif !errors.Is(err, os.ErrNotExist) {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\n\t\terr = os.Mkdir(oldgenPath, os.ModePerm)\n\t\tif err != nil && !errors.Is(err, os.ErrExist) {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t}\n\n\toldGenSt, err := nbs.NewLocalStore(ctx, newGenSt.Version(), oldgenPath, defaultMemTableSize, q)\n\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tst := nbs.NewGenerationalCS(oldGenSt, newGenSt)\n\t// metrics?\n\n\tvrw := types.NewValueStore(st)\n\tns := tree.NewNodeStore(st)\n\tddb := datas.NewTypesDatabase(vrw, ns)\n\n\tsingletons[urlObj.Path] = singletonDB{\n\t\tddb: ddb,\n\t\tvrw: vrw,\n\t\tns: ns,\n\t}\n\n\treturn ddb, vrw, ns, nil\n}", "func InitDB(driver, connectionstring string) error {\n\tdb, err := gorm.Open(driver, connectionstring)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetDB(db)\n\treturn nil\n}", "func InitDB() {\n\tvar err error\n\tdatabase.DBConn, err = gorm.Open(sqlite.Open(\"books.db\"), &gorm.Config{})\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\tfmt.Println(\"Database successfully connected\")\n\tdatabase.DBConn.AutoMigrate(&book.Book{})\n}", "func InitDB() {\n\tvar err error\n\tdb, err = gorm.Open(sqlite.Open(\"./test.db\"), &gorm.Config{})\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\treturn\n}", "func initAppDB() {\n\n\t// Init config data\n\tdbConf := GetDBConfig()\n\tdbConf.IsAppDB = true\n\n\tdbPoolApp, err := initSocketConnectionPool(dbConf)\n\tif err != nil {\n\t\tlog.Println(\"initial dbConnApp fail : \", err.Error())\n\t} else {\n\t\tlog.Println(\"initial dbConnApp successful\")\n\t\tdbConf.Conn = dbPoolApp\n\t\tdbConf.InitSuccess = true\n\t}\n\n\tdbConf.Err = err\n\n\t// Keep instance\n\tdbAppConf = dbConf\n}", "func InitDB() *sql.DB {\n\t__db, err := sql.Open(\"sqlite3\", \"../realdb.db\")\n\tif err != nil { panic(err) }\n if __db == nil { panic(\"db nil\") }\n\n\treturn __db\n}", "func InitDatabase() *Server {\n\tvar err error\n\n\tconnString := getConnString()\n\n\tlog.Printf(\"Setting connection to db with configuration: %s \\n\", connString)\n\n\tserver := &Server{}\n\tserver.db, err = sql.Open(\"sqlserver\", connString)\n\tif err != nil {\n\t\tlog.Fatal(\"Error opening connection: \", err.Error())\n\t}\n\n\tserver.db.SetConnMaxLifetime(time.Minute * 4)\n\n\treturn server\n}", "func (this *DBHandler) Init() {\n\tvar derr error\n\tthis.db, derr = sql.Open(\"sqlite3\", DB_FILE_NAME)\n\tif derr != nil {\n\t\tfmt.Println(derr)\n\t}\n\tthis.createNewTable(TABLE_WPA)\n\tthis.createNewTable(TABLE_WORDLISTS)\n\tthis.createNewTable(TABLE_RUNS)\n}", "func InitDB() *gorm.DB {\n\tdsn := \"host=\" + DBConfig.Host + \" user=\" + DBConfig.User + \" password=\" + DBConfig.Password + \" dbname=\" +\n\t\tDBConfig.Name + \" port=\" + DBConfig.Port + \" sslmode=disable TimeZone=Asia/Shanghai\"\n\tdb, err := gorm.Open(postgres.Open(dsn), &gorm.Config{\n\t\tDisableForeignKeyConstraintWhenMigrating: true,\n\t\tLogger: logger.Default.LogMode(logger.Info),\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"db err: \", err)\n\t}\n\tsqlDB, err := db.DB()\n\t// SetMaxIdleConns 设置空闲连接池中连接的最大数量\n\tsqlDB.SetMaxIdleConns(10)\n\n\t// SetMaxOpenConns 设置打开数据库连接的最大数量。\n\tsqlDB.SetMaxOpenConns(100)\n\n\t// SetConnMaxLifeti\t1qme 设置了连接可复用的最大时间。\n\tsqlDB.SetConnMaxLifetime(time.Hour)\n\t//db.LogMode(true)\n\tD = db\n\tSqlDB = sqlDB\n\treturn D\n}", "func initDB(dbName string) (dbType, error) {\n\th, err := sql.Open(dbDriver, dbName)\n\treturn dbType{handle: h}, err\n}", "func Init() {\n\tvar err error\n\tdb, err = sql.Open(\"sqlite3\", \"db-data/app.db\")\n\tif err != nil {\n\t\tlog.Fatal(\"failed to open db: \", err)\n\t}\n\n\tif _, err := db.Exec(createStmt); err != nil {\n\t\tlog.Fatal(\"failed to initialize db: \", err)\n\t}\n}", "func InitDB() *sql.DB {\n\tdb, err := sql.Open(\"sqlite3\", \"./angebote.db\")\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif db == nil {\n\t\tpanic(\"db nil\")\n\t}\n\n\treturn db\n}", "func InitializeDB() *Database {\n\tconfig := new(dbConfig)\n\tconfigFile, err := ioutil.ReadFile(\"config.yaml\")\n\terr = yaml.Unmarshal(configFile, &config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcredentials := fmt.Sprintf(\"%s:%s@/%s?charset=utf8&parseTime=True&loc=Local\", config.Database.DatabaseUser, config.Database.DatabasePassword, config.Database.Database)\n\tdialect := config.Database.DatabaseType\n\n\tdb, err := gorm.Open(dialect, credentials)\n\tif err != nil {\n\t\tlog.Fatal().Msgf(\"Failed to connect to Database. Reason: %v\\n\", err)\n\t}\n\tlog.Info().Msg(\"Successfully connected to qBot Database.\")\n\n\tdb.DB().SetConnMaxLifetime(time.Second * 100)\n\tdb.DB().SetMaxIdleConns(50)\n\tdb.DB().SetMaxOpenConns(200)\n\n\t//db.DropTableIfExists(models.User{}, models.Question{}, models.Answer{}) // Temp\n\t//db.DropTable(\"user_questions\", \"question_answers\", \"user_answers\") // Temp\n\tif err := db.AutoMigrate(models.User{}, models.Question{}, models.Answer{}).Error; err != nil {\n\t\tlog.Fatal().Msgf(\"Unable to migrate database. \\nReason: %v\", err)\n\t}\n\tlog.Info().Msg(\"Database migration successful.\")\n\treturn &Database{db}\n}", "func InitDb(dbConfig Config) (*sql.DB, error) {\n\tconnectionURL := newSQLServerConnectionURL(\n\t\tdbConfig.User, dbConfig.Pass, dbConfig.Host, dbConfig.Name, dbConfig.Port)\n\tcreateTableQuery := sqlServerTableCreationQuery\n\n\tlog.Debugf(\"Establishing connection with '%s'. Connection string: '%q'\", dbDriverName,\n\t\tstrings.Replace(connectionURL.String(), connectionURL.User.String() + \"@\", \"***:***@\", 1))\n\n\tdb, err := sql.Open(dbDriverName, connectionURL.String())\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"while establishing connection to '%s'\", dbDriverName)\n\t}\n\n\tlog.Debug(\"Testing connection\")\n\tif err := db.Ping(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"while testing DB connection\")\n\t}\n\n\tq := strings.Replace(createTableQuery, \"{name}\", SanitizeSQLArg(dbConfig.DbOrdersTableName), -1)\n\tlog.Debugf(\"Ensuring table exists. Running query: '%q'.\", q)\n\tif _, err := db.Exec(q); err != nil {\n\t\treturn nil, errors.Wrap(err, \"while initiating DB table\")\n\t}\n\n\treturn db, nil\n}", "func InitDatabase(dsn string) error {\n\tfmt.Println(\"Init db connection\")\n\t// config := mysql.NewConfig()\n\t// config.User = username\n\t// config.Passwd = password\n\t// config.Net = protocol\n\t// config.Addr = host\n\t// config.DBName = database\n\t// config.Params = map[string]string{\n\t// \t\"charset\": charset,\n\t// \t\"parseTime\": \"True\",\n\t// }\n\tdb, err := gorm.Open(\"sqlite3\", dsn)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\tDbConn = db\n\treturn nil\n}", "func initDb() *gorp.DbMap {\n\tdb, err := sql.Open(\"mysql\", \"root:123@tcp(db:3306)/FLINT\")\n\tcheckErr(err, \"sql.Open failed\")\n\n\t// construct a gorp DbMap\n\tdbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}}\n\n\treturn dbmap\n}", "func InitDB(init bool) {\n\tif init {\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS kv (\n\t\t\tk TEXT PRIMARY KEY,\n\t\t\tv TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS datasets (\n\t\t\tid INTEGER PRIMARY KEY ASC,\n\t\t\tname TEXT,\n\t\t\t-- 'data' or 'computed'\n\t\t\ttype TEXT,\n\t\t\tdata_type TEXT,\n\t\t\tmetadata TEXT DEFAULT '',\n\t\t\t-- only set if computed\n\t\t\thash TEXT,\n\t\t\tdone INTEGER DEFAULT 1\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS annotate_datasets (\n\t\t\tid INTEGER PRIMARY KEY ASC,\n\t\t\tdataset_id INTEGER REFERENCES datasets(id),\n\t\t\tinputs TEXT,\n\t\t\ttool TEXT,\n\t\t\tparams TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS pytorch_archs (\n\t\t\tid TEXT PRIMARY KEY,\n\t\t\tparams TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS pytorch_components (\n\t\t\tid TEXT PRIMARY KEY,\n\t\t\tparams TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS exec_nodes (\n\t\t\tid INTEGER PRIMARY KEY ASC,\n\t\t\tname TEXT,\n\t\t\top TEXT,\n\t\t\tparams TEXT,\n\t\t\tparents TEXT,\n\t\t\tworkspace TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS exec_ds_refs (\n\t\t\tnode_id INTEGER,\n\t\t\tdataset_id INTEGER,\n\t\t\tUNIQUE(node_id, dataset_id)\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS workspaces (\n\t\t\tname TEXT PRIMARY KEY\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS ws_datasets (\n\t\t\tdataset_id INTEGER,\n\t\t\tworkspace TEXT,\n\t\t\tUNIQUE(dataset_id, workspace)\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS jobs (\n\t\t\tid INTEGER PRIMARY KEY ASC,\n\t\t\tname TEXT,\n\t\t\t-- e.g. 'execnode'\n\t\t\ttype TEXT,\n\t\t\t-- how to process the job output and render the job\n\t\t\top TEXT,\n\t\t\tmetadata TEXT,\n\t\t\tstart_time TIMESTAMP,\n\t\t\tstate TEXT DEFAULT '',\n\t\t\tdone INTEGER DEFAULT 0,\n\t\t\terror TEXT DEFAULT ''\n\t\t)`)\n\n\t\t// add missing pytorch components\n\t\tcomponentPath := \"python/skyhook/pytorch/components/\"\n\t\tfiles, err := ioutil.ReadDir(componentPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfor _, fi := range files {\n\t\t\tif !strings.HasSuffix(fi.Name(), \".json\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tid := strings.Split(fi.Name(), \".json\")[0]\n\t\t\tbytes, err := ioutil.ReadFile(filepath.Join(componentPath, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdb.Exec(\"INSERT OR REPLACE INTO pytorch_components (id, params) VALUES (?, ?)\", id, string(bytes))\n\t\t}\n\n\t\t// add missing pytorch archs\n\t\tarchPath := \"exec_ops/pytorch/archs/\"\n\t\tfiles, err = ioutil.ReadDir(archPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfor _, fi := range files {\n\t\t\tif !strings.HasSuffix(fi.Name(), \".json\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tid := strings.Split(fi.Name(), \".json\")[0]\n\t\t\tbytes, err := ioutil.ReadFile(filepath.Join(archPath, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdb.Exec(\"INSERT OR REPLACE INTO pytorch_archs (id, params) VALUES (?, ?)\", id, string(bytes))\n\t\t}\n\n\t\t// add default workspace if it doesn't exist\n\t\tvar count int\n\t\tdb.QueryRow(\"SELECT COUNT(*) FROM workspaces WHERE name = ?\", \"default\").Scan(&count)\n\t\tif count == 0 {\n\t\t\tdb.Exec(\"INSERT INTO workspaces (name) VALUES (?)\", \"default\")\n\t\t}\n\t}\n\n\t// now run some database cleanup steps\n\n\t// mark jobs that are still running as error\n\tdb.Exec(\"UPDATE jobs SET error = 'terminated', done = 1 WHERE done = 0\")\n\n\t// delete temporary datasetsTODO\n}", "func DBInit(conStr string) {\n\tif db == nil {\n\t\tvar err error\n\t\tdbConnection, err := gorm.Open(\"mysql\", conStr+\"?charset=utf8&parseTime=True&loc=Local\")\n\t\t// db connection will be closed if there's no request for a while\n\t\t// which would cause 500 error when a new request comes.\n\t\t// diable pool here to avoid this problem.\n\t\tdbConnection.DB().SetMaxIdleConns(0)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t}).Fatal(\"Faile to create db connection pool\")\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"message\": dbConnection.GetErrors(),\n\t\t\t\t\"db\": conStr,\n\t\t\t}).Info(\"connected to mysql\")\n\t\t}\n\t\tdb = &DB{dbConnection}\n\t}\n\tdb.dbConnect.SetLogger(log.StandardLogger())\n\t// db.Debug message will be logged be logrug with Info level\n\tdb.dbConnect.Debug().AutoMigrate(&Todo{})\n}", "func CreateDB(config *defs.Config) error {\n\tvar db, err = sql.Open(\"postgres\", \"user=\"+config.DatabaseUserName+\n\t\t\" dbname=\"+config.DatabaseName+\" sslmode=disable\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar statements = [][]string{\n\t\t{\"Creating table users...\", `CREATE TABLE users (\n\t\t\tid serial PRIMARY KEY,\n\t\t\temail text NOT NULL,\n\t\t\tname text,\n\t\t\trole text NOT NULL,\n\t\t\ttoken text,\n\t\t\tcreation_date timestamp WITH TIME ZONE NOT NULL\n\t\t\t\t\t\t\t\tDEFAULT CURRENT_TIMESTAMP,\n\t\t\tlastlog timestamp WITH TIME ZONE)`},\n\t\t{\"Creating table photos...\", `CREATE TABLE photos (\n\t\t\tid \tserial PRIMARY KEY,\n\t\t\tfilename\t\ttext NOT NULL,\n\t\t\tmimetype\t\ttext NOT NULL,\n\t\t\tsize\t\t\tinteger NOT NULL,\n\t\t\tcreation_date timestamp WITH TIME ZONE NOT NULL\n\t\t\t\t\t\t\t\tDEFAULT CURRENT_TIMESTAMP,\n\t\t\tauthor_id \tinteger NOT NULL REFERENCES users(id),\n\t\t\tcaption\t\t\ttext NOT NULL DEFAULT '',\n\t\t\timage\t\t\tbytea NOT NULL,\n\t\t\tthumbnail\t\tbytea NOT NULL,\n\t\t\tbig_thumbnail\tbytea NOT NULL)`},\n\t\t{\"Creating table albums...\", `CREATE TABLE albums (\n\t\t\tname text PRIMARY KEY,\n\t\t\tcover_image_id\tinteger REFERENCES photos(id))`},\n\t\t{\"Creating table photo_albums...\", `CREATE TABLE photo_albums (\n\t\t\tphoto_id\tinteger REFERENCES photos(id) ON DELETE CASCADE NOT NULL,\n\t\t\talbum_name \ttext REFERENCES albums(name) ON DELETE CASCADE\n\t\t\t\t\t\t\tON UPDATE CASCADE NOT NULL,\n\t\t\tUNIQUE (photo_id, album_name))`},\n\t}\n\tfor _, s := range statements {\n\t\tif s[0] != \"\" {\n\t\t\tlog.Println(s[0])\n\t\t}\n\t\t_, err = db.Exec(s[1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Add the default admins listed in the config file to the users table.\n\tlog.Println(\"Inserting default admins...\")\n\tfor _, admin := range config.DefaultAdmins {\n\t\t_, err = db.Exec(`INSERT INTO users (email, role) VALUES ($1, $2)`,\n\t\t\tadmin, \"Admin\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Println(\"Database setup complete!\")\n\treturn nil\n}", "func (p *DatabaseHandler) init(s *Server) error {\n\tdb, err := sql.Open(\"sqlite3\", s.srcDir+\"/database.db\")\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn StringError{\"ERROR: Some of databases weren't opened!\"}\n\t}\n\tp.db = db\n\n\tp.createTable()\n\treturn nil\n}", "func initDBase(filepath string) (*bolt.DB, error) {\n\tdb, err := bolt.Open(filepath, 0777, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"initDBase:Open\")\n\t}\n\tfor _, bucket := range []string{\"serial\", \"times\", \"temps\"} {\n\t\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t\t_, err := tx.CreateBucketIfNotExists([]byte(bucket))\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"initDBase:CreateBucket\")\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"initDBase:Update\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn db, nil\n}", "func DbInit() {\n\tsqlStatement := `\n\t\tCREATE TABLE benutzer (\n\t\t\tfdNummer VARCHAR(256) PRIMARY KEY, \n\t\t\tVorname VARCHAR(256) NOT NULL, \n\t\t\tNachname VARCHAR(256) NULL,\n\t\t\tAge TINYINT NULL,\n\t\t\tStudiengang VARCHAR(256) NULL,\n\t\t\tSemester TINYINT NULL\n\t\t\t);\n\n\t\tCREATE TABLE nachrichten (\n\t\t\tNachrichtID INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tfdNummer VARCHAR(256) NOT NULL,\n\t\t\tGroupID INTEGER NOT NULL,\n\t\t\tmessage VARCHAR(256) NOT NULL,\n\t\t\tgesendeteUhrzeit DEFAULT CURRENT_TIMESTAMP\n\t\t\t);\n\n\t\tCREATE TABLE chatgroup (\n\t\t\tGroupID INTEGER PRIMARY KEY,\n\t\t\tGroupName VARCHAR(256) NOT NULL\n\t\t\t);\n\t`\n\n\tCreate(sqlStatement)\n}" ]
[ "0.70847756", "0.7069799", "0.7032143", "0.70092875", "0.6844531", "0.684309", "0.6831744", "0.68244797", "0.68110096", "0.67675555", "0.6757469", "0.67567056", "0.674744", "0.6733488", "0.67181665", "0.6714626", "0.6713455", "0.6654968", "0.6650416", "0.6645018", "0.6631553", "0.66214526", "0.6620517", "0.6615508", "0.6613205", "0.65968645", "0.659301", "0.6588243", "0.65851206", "0.6582108", "0.6562235", "0.65410703", "0.6530827", "0.65303946", "0.6525932", "0.651277", "0.65117645", "0.6493667", "0.64933366", "0.6483696", "0.6474171", "0.6470889", "0.6464638", "0.6442037", "0.64339364", "0.6433546", "0.6429557", "0.64251703", "0.64237523", "0.6419213", "0.6418589", "0.64092535", "0.64080346", "0.6406236", "0.6405172", "0.6403764", "0.640067", "0.6394968", "0.63908213", "0.63820934", "0.6370486", "0.63702315", "0.6370204", "0.63651055", "0.6362537", "0.6357253", "0.6355348", "0.6352987", "0.63223046", "0.6320942", "0.6313236", "0.63118935", "0.6308227", "0.63028854", "0.6299385", "0.62971896", "0.6294327", "0.6292665", "0.6288372", "0.6284805", "0.6283576", "0.6282414", "0.62819505", "0.62797976", "0.62667435", "0.62637514", "0.62632835", "0.62619215", "0.6259256", "0.6257397", "0.62560385", "0.625344", "0.62521243", "0.6248342", "0.624595", "0.623377", "0.62089705", "0.6206665", "0.62061995", "0.6196934" ]
0.7566985
0
SaveIp writes a model.IP to the database or updates it if already exists
func (sqlDb *SqliteDB) SaveIp(ip *model.IP) error { upsert, err := db.Prepare("INSERT OR REPLACE INTO ip (ip_address, uuid, created_at, updated_at, response_code) VALUES (?, ?, ?, ?, ?)") defer upsert.Close() if err != nil { return errors.New(fmt.Sprint("ERROR preparing db insert statement:", err.Error())) } _, err = upsert.Exec(ip.IPAddress, ip.UUID, ip.CreatedAt, ip.UpdatedAt, ip.ResponseCode) if err != nil { return errors.New(fmt.Sprint("ERROR executing DB insert:", err.Error())) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func SaveIPAddr(ipaddr string, amount int, ref db.DBClient) {\n\tjst, _ := time.LoadLocation(\"Asia/Tokyo\")\n\tnow := time.Now().In(jst).Format(\"2006-01-02 15:04:05 -0700 MST\")\n\tuser := User{\n\t\tIPAddr: ipaddr,\n\t\tTime: now,\n\t\tAmount: amount,\n\t}\n\terr := ref.Push(user)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}", "func (test *Test) CreateOrUpdateIP(projectName string, ip models.IP) error {\n\treturn nil\n}", "func UpdateIP(username, ip string) {\n\torm := get_DBFront()\n\tt := make(map[string]interface{})\n\tt[\"lastip\"] = ip\n\t_, err := orm.SetTable(\"user\").Where(\"username=?\", username).Update(t)\n\tif !check_err(err) {\n\t\tLog(Log_Struct{\"error\", \"DB_Error_Line_309\", err})\n\t}\n}", "func InsertIP(db *sql.DB, ip string, name string) {\n\tstmt, err := db.Prepare(\"INSERT INTO raspberrypis (ip, name) VALUES (?, ?)\")\n\tcheckErr(err)\n\t\n\tstmt.Exec(ip, name)\n}", "func (w *IPWriter) WriteIP(ip *IP) error {\n\trootUint := (*w).uintTree[0]\n\tipTree, err := w.MakeIPTree(ip, rootUint, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\trootRst, err := w.readRecordset(0)\n\tif err != nil {\n\t\tif err != apperror.ErrNotFound {\n\t\t\treturn err\n\t\t}\n\t\trootRst = &Recordset{\n\t\t\tValue: ipTree[0].Value,\n\t\t}\n\t\terr = w.writeRecordset(rootRst, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = w.bypass(ipTree[0], rootRst, w.writeIfNotFound)\n\treturn err\n}", "func (o *NetworkingProjectNetadpCreate) SetIp(v []string) {\n\to.Ip = v\n}", "func (n *hostOnlyNetwork) SaveIPv4(vbox VBoxManager) error {\n\tif n.IPv4.IP != nil && n.IPv4.Mask != nil {\n\t\tif err := vbox.vbm(\"hostonlyif\", \"ipconfig\", n.Name, \"--ip\", n.IPv4.IP.String(), \"--netmask\", net.IP(n.IPv4.Mask).String()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (n *hostOnlyNetwork) Save(vbox VBoxManager) error {\n\tif err := n.SaveIPv4(vbox); err != nil {\n\t\treturn err\n\t}\n\n\tif n.DHCP {\n\t\tvbox.vbm(\"hostonlyif\", \"ipconfig\", n.Name, \"--dhcp\") // not implemented as of VirtualBox 4.3\n\t}\n\n\treturn nil\n}", "func CreateIP(userID string, ip string, lastUsed time.Time) (IP, error) {\n\tipStruct := IP{\n\t\tUserID: userID,\n\t\tIP: ip,\n\t\tLastUsed: lastUsed,\n\t}\n\t_, err := db.Exec(\"INSERT INTO web_ips(user_id, ip, last_used) VALUES (?, ?, ?)\", userID, ip, lastUsed)\n\tif err != nil {\n\t\treturn IP{}, err\n\t}\n\n\treturn ipStruct, nil\n}", "func (o *WafEventNetwork) SetIp(v string) {\n\to.Ip = &v\n}", "func (fs *FakeSession) SetIP(oid string, value string) *FakeSession {\n\treturn fs.Set(oid, gosnmp.IPAddress, value)\n}", "func CreateInstanceIP(\n\tctx context.Context,\n\ttx *sql.Tx,\n\trequest *models.CreateInstanceIPRequest) error {\n\tmodel := request.InstanceIP\n\t// Prepare statement for inserting data\n\tstmt, err := tx.Prepare(insertInstanceIPQuery)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"preparing create statement failed\")\n\t}\n\tdefer stmt.Close()\n\tlog.WithFields(log.Fields{\n\t\t\"model\": model,\n\t\t\"query\": insertInstanceIPQuery,\n\t}).Debug(\"create query\")\n\t_, err = stmt.ExecContext(ctx, string(model.GetUUID()),\n\t\tstring(model.GetSubnetUUID()),\n\t\tbool(model.GetServiceInstanceIP()),\n\t\tbool(model.GetServiceHealthCheckIP()),\n\t\tint(model.GetSecondaryIPTrackingIP().GetIPPrefixLen()),\n\t\tstring(model.GetSecondaryIPTrackingIP().GetIPPrefix()),\n\t\tcommon.MustJSON(model.GetPerms2().GetShare()),\n\t\tint(model.GetPerms2().GetOwnerAccess()),\n\t\tstring(model.GetPerms2().GetOwner()),\n\t\tint(model.GetPerms2().GetGlobalAccess()),\n\t\tstring(model.GetParentUUID()),\n\t\tstring(model.GetParentType()),\n\t\tbool(model.GetInstanceIPSecondary()),\n\t\tstring(model.GetInstanceIPMode()),\n\t\tbool(model.GetInstanceIPLocalIP()),\n\t\tstring(model.GetInstanceIPFamily()),\n\t\tstring(model.GetInstanceIPAddress()),\n\t\tbool(model.GetIDPerms().GetUserVisible()),\n\t\tint(model.GetIDPerms().GetPermissions().GetOwnerAccess()),\n\t\tstring(model.GetIDPerms().GetPermissions().GetOwner()),\n\t\tint(model.GetIDPerms().GetPermissions().GetOtherAccess()),\n\t\tint(model.GetIDPerms().GetPermissions().GetGroupAccess()),\n\t\tstring(model.GetIDPerms().GetPermissions().GetGroup()),\n\t\tstring(model.GetIDPerms().GetLastModified()),\n\t\tbool(model.GetIDPerms().GetEnable()),\n\t\tstring(model.GetIDPerms().GetDescription()),\n\t\tstring(model.GetIDPerms().GetCreator()),\n\t\tstring(model.GetIDPerms().GetCreated()),\n\t\tcommon.MustJSON(model.GetFQName()),\n\t\tstring(model.GetDisplayName()),\n\t\tcommon.MustJSON(model.GetAnnotations().GetKeyValuePair()))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"create failed\")\n\t}\n\n\tstmtNetworkIpamRef, err := tx.Prepare(insertInstanceIPNetworkIpamQuery)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"preparing NetworkIpamRefs create statement failed\")\n\t}\n\tdefer stmtNetworkIpamRef.Close()\n\tfor _, ref := range model.NetworkIpamRefs {\n\n\t\t_, err = stmtNetworkIpamRef.ExecContext(ctx, model.UUID, ref.UUID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"NetworkIpamRefs create failed\")\n\t\t}\n\t}\n\n\tstmtVirtualNetworkRef, err := tx.Prepare(insertInstanceIPVirtualNetworkQuery)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"preparing VirtualNetworkRefs create statement failed\")\n\t}\n\tdefer stmtVirtualNetworkRef.Close()\n\tfor _, ref := range model.VirtualNetworkRefs {\n\n\t\t_, err = stmtVirtualNetworkRef.ExecContext(ctx, model.UUID, ref.UUID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"VirtualNetworkRefs create failed\")\n\t\t}\n\t}\n\n\tstmtVirtualMachineInterfaceRef, err := tx.Prepare(insertInstanceIPVirtualMachineInterfaceQuery)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"preparing VirtualMachineInterfaceRefs create statement failed\")\n\t}\n\tdefer stmtVirtualMachineInterfaceRef.Close()\n\tfor _, ref := range model.VirtualMachineInterfaceRefs {\n\n\t\t_, err = stmtVirtualMachineInterfaceRef.ExecContext(ctx, model.UUID, ref.UUID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"VirtualMachineInterfaceRefs create failed\")\n\t\t}\n\t}\n\n\tstmtPhysicalRouterRef, err := tx.Prepare(insertInstanceIPPhysicalRouterQuery)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"preparing PhysicalRouterRefs create statement failed\")\n\t}\n\tdefer stmtPhysicalRouterRef.Close()\n\tfor _, ref := range model.PhysicalRouterRefs {\n\n\t\t_, err = stmtPhysicalRouterRef.ExecContext(ctx, model.UUID, ref.UUID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"PhysicalRouterRefs create failed\")\n\t\t}\n\t}\n\n\tstmtVirtualRouterRef, err := tx.Prepare(insertInstanceIPVirtualRouterQuery)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"preparing VirtualRouterRefs create statement failed\")\n\t}\n\tdefer stmtVirtualRouterRef.Close()\n\tfor _, ref := range model.VirtualRouterRefs {\n\n\t\t_, err = stmtVirtualRouterRef.ExecContext(ctx, model.UUID, ref.UUID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"VirtualRouterRefs create failed\")\n\t\t}\n\t}\n\n\tmetaData := &common.MetaData{\n\t\tUUID: model.UUID,\n\t\tType: \"instance_ip\",\n\t\tFQName: model.FQName,\n\t}\n\terr = common.CreateMetaData(tx, metaData)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = common.CreateSharing(tx, \"instance_ip\", model.UUID, model.GetPerms2().GetShare())\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"model\": model,\n\t}).Debug(\"created\")\n\treturn nil\n}", "func (s *IpAddressUpdate) SetIp(v string) *IpAddressUpdate {\n\ts.Ip = &v\n\treturn s\n}", "func (s *IpAddressUpdate) SetIp(v string) *IpAddressUpdate {\n\ts.Ip = &v\n\treturn s\n}", "func (u *GameServerUpsertOne) UpdateIPAddress() *GameServerUpsertOne {\n\treturn u.Update(func(s *GameServerUpsert) {\n\t\ts.UpdateIPAddress()\n\t})\n}", "func (m *ServicePrincipalRiskDetection) SetIpAddress(value *string)() {\n err := m.GetBackingStore().Set(\"ipAddress\", value)\n if err != nil {\n panic(err)\n }\n}", "func (m *BgpConfiguration) SetIpAddress(value *string)() {\n err := m.GetBackingStore().Set(\"ipAddress\", value)\n if err != nil {\n panic(err)\n }\n}", "func UpdateInstanceIP(\n\tctx context.Context,\n\ttx *sql.Tx,\n\trequest *models.UpdateInstanceIPRequest,\n) error {\n\t//TODO\n\treturn nil\n}", "func (u *GameServerUpsertBulk) UpdateIPAddress() *GameServerUpsertBulk {\n\treturn u.Update(func(s *GameServerUpsert) {\n\t\ts.UpdateIPAddress()\n\t})\n}", "func writeIP(b *uio.Lexer, ip net.IP) {\n\tvar zeros [net.IPv4len]byte\n\tif ip == nil {\n\t\tb.WriteBytes(zeros[:])\n\t} else {\n\t\t// Converting IP to 4 byte format\n\t\tip = ip.To4()\n\t\tb.WriteBytes(ip[:net.IPv4len])\n\t}\n}", "func (ua *UserAddress) Save(ctx context.Context) error {\n\tif ua.Exists() {\n\t\treturn ua.Update(ctx)\n\t}\n\n\treturn ua.Insert(ctx)\n}", "func (s *IpAddressResponse) SetIp(v string) *IpAddressResponse {\n\ts.Ip = &v\n\treturn s\n}", "func (s *IpAddressResponse) SetIp(v string) *IpAddressResponse {\n\ts.Ip = &v\n\treturn s\n}", "func (o *NetworkLoadBalancerForwardingRuleTarget) SetIp(v string) {\n\n\to.Ip = &v\n\n}", "func (u *GameServerUpsert) UpdateIPAddress() *GameServerUpsert {\n\tu.SetExcluded(gameserver.FieldIPAddress)\n\treturn u\n}", "func (ipset *IPSet) Save() error {\n\tstdout, err := ipset.run(\"save\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tipset.Sets = parseIPSetSave(ipset, stdout)\n\treturn nil\n}", "func saveClusterIP(current, overlay *unstructured.Unstructured) error {\n\t// Save the value of spec.clusterIP set by the cluster\n\tif clusterIP, found, err := unstructured.NestedString(current.Object, \"spec\",\n\t\t\"clusterIP\"); err != nil {\n\t\treturn err\n\t} else if found {\n\t\tif err := unstructured.SetNestedField(overlay.Object, clusterIP, \"spec\",\n\t\t\t\"clusterIP\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *IpAddressRequest) SetIp(v string) *IpAddressRequest {\n\ts.Ip = &v\n\treturn s\n}", "func (s *IpAddressRequest) SetIp(v string) *IpAddressRequest {\n\ts.Ip = &v\n\treturn s\n}", "func SetIP(ips string) {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\tvar nip net.IP\n\tif nip = net.ParseIP(ips); nip == nil {\n\t\tlog.Println(\"ip\", ips, \"is illegal format\")\n\t\treturn\n\t}\n\tif nat.IsGlobalIP(nip) != \"\" {\n\t\tip = ips\n\t}\n}", "func (s *TargetAddress) SetIp(v string) *TargetAddress {\n\ts.Ip = &v\n\treturn s\n}", "func (s *TargetAddress) SetIp(v string) *TargetAddress {\n\ts.Ip = &v\n\treturn s\n}", "func (m *Myself) setIP(ips string) {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tvar ip net.IP\n\tif ip = net.ParseIP(ips); ip == nil {\n\t\tlog.Println(\"ip\", ips, \"is illegal format\")\n\t\treturn\n\t}\n\tif nat.IsGlobalIP(ip) != \"\" {\n\t\tm.ip = ips\n\t}\n}", "func (o *GetLTENetworkIDSubscribersV2Params) SetIP(ip *string) {\n\to.IP = ip\n}", "func (mt *mockTokenBuilder) SetIPAddress(ip string) {\n\t//TODO some mocking\n}", "func (test *Test) CreateOrUpdateIPs(projectName string, ip []models.IP) error {\n\treturn nil\n}", "func (o *NetworkingProjectNetadpCreate) HasIp() bool {\n\tif o != nil && o.Ip != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *AuditLogMutation) SetIPAddress(s string) {\n\tm.ip_address = &s\n}", "func (gsc *GameServerCreate) SetIPAddress(s string) *GameServerCreate {\n\tgsc.mutation.SetIPAddress(s)\n\treturn gsc\n}", "func (gsuo *GameServerUpdateOne) SetIPAddress(s string) *GameServerUpdateOne {\n\tgsuo.mutation.SetIPAddress(s)\n\treturn gsuo\n}", "func (in *ActionUserSessionIndexInput) SetIpAddr(value string) *ActionUserSessionIndexInput {\n\tin.IpAddr = value\n\n\tif in._selectedParameters == nil {\n\t\tin._selectedParameters = make(map[string]interface{})\n\t}\n\n\tin._selectedParameters[\"IpAddr\"] = nil\n\treturn in\n}", "func (pk *PublicKey) SaveToDB() error {\n\treturn model.SetPublicKey(string(pk.ID), pk)\n}", "func (w *IPWriter) Write(ips string) error {\n\tip, err := NewIP(ips)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn w.WriteIP(ip)\n}", "func (p *Client) CreateIP(ctx context.Context, namespace, clusterName, projectID, facility, metro string) (net.IP, error) {\n\tfailOnApprovalRequired := true\n\treq := metal.IPReservationRequestInput{\n\t\tType: \"public_ipv4\",\n\t\tQuantity: 1,\n\t\tFacility: &facility,\n\t\tMetro: &metro,\n\t\tFailOnApprovalRequired: &failOnApprovalRequired,\n\t\tTags: []string{generateElasticIPIdentifier(clusterName)},\n\t}\n\n\tapiRequest := p.IPAddressesApi.RequestIPReservation(ctx, projectID)\n\tr, resp, err := apiRequest.RequestIPReservationRequest(metal.RequestIPReservationRequest{\n\t\tIPReservationRequestInput: &req,\n\t}).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == http.StatusUnprocessableEntity {\n\t\treturn nil, ErrElasticIPQuotaExceeded\n\t}\n\n\trawIP := r.IPReservation.GetAddress()\n\tip := net.ParseIP(rawIP)\n\tif ip == nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse IP: %s, %w\", rawIP, ErrInvalidIP)\n\t}\n\treturn ip, nil\n}", "func (h Hostingv4) CreateIP(region hosting.Region, version hosting.IPVersion) (hosting.IPAddress, error) {\n\tif version != hosting.IPv4 && version != hosting.IPv6 {\n\t\treturn hosting.IPAddress{}, errors.New(\"Bad IP version\")\n\t}\n\n\tregionID, err := strconv.Atoi(region.ID)\n\tif err != nil {\n\t\treturn hosting.IPAddress{}, internalParseError(\"Region\", \"ID\")\n\t}\n\n\tvar ip iPAddressv4\n\tvar response = Operation{}\n\terr = h.Send(\"hosting.iface.create\", []interface{}{\n\t\tmap[string]interface{}{\n\t\t\t\"datacenter_id\": regionID,\n\t\t\t\"ip_version\": int(version),\n\t\t\t\"bandwidth\": hosting.DefaultBandwidth,\n\t\t}}, &response)\n\tif err != nil {\n\t\treturn hosting.IPAddress{}, err\n\t}\n\tif err = h.waitForOp(response); err != nil {\n\t\treturn hosting.IPAddress{}, err\n\t}\n\n\tif err = h.Send(\"hosting.ip.info\", []interface{}{response.IPID}, &ip); err != nil {\n\t\treturn hosting.IPAddress{}, err\n\t}\n\n\treturn toIPAddress(ip), nil\n}", "func (o ResolverEndpointIpAddressOutput) Ip() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ResolverEndpointIpAddress) *string { return v.Ip }).(pulumi.StringPtrOutput)\n}", "func (db *ipDBManager) releaseIP(ip string) {\n\tif iface, ok := db.ips[ip]; ok {\n\t\tdelete(db.interfaces, iface)\n\t}\n\tdb.ips[ip] = \"\"\n}", "func (rb *ShardsRecordBuilder) Ip(ip string) *ShardsRecordBuilder {\n\trb.v.Ip = ip\n\treturn rb\n}", "func (au *AddressUpdate) Save(ctx context.Context) (int, error) {\n\tvar (\n\t\terr error\n\t\taffected int\n\t)\n\tif len(au.hooks) == 0 {\n\t\taffected, err = au.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*AddressMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tau.mutation = mutation\n\t\t\taffected, err = au.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn affected, err\n\t\t})\n\t\tfor i := len(au.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = au.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, au.mutation); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn affected, err\n}", "func (dquo *DNSBLQueryUpdateOne) SetIPAddress(i *IP) *DNSBLQueryUpdateOne {\n\treturn dquo.SetIPAddressID(i.ID)\n}", "func IP(v string) predicate.IP {\n\treturn predicate.IP(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldIP), v))\n\t})\n}", "func (gsu *GameServerUpdate) SetIPAddress(s string) *GameServerUpdate {\n\tgsu.mutation.SetIPAddress(s)\n\treturn gsu\n}", "func Test_DeviceService_AddOrUpdate_NotValidIP(t *testing.T) {\n\ts := DeviceService{}\n\t_, err := s.AddOrUpdate(\"122.55.45.999\", 37777)\n\tassert.Error(t, err)\n}", "func (p IPPacket) SetIP(t IPType, ip net.IP) error {\n\tswitch p.Version() {\n\tcase 4:\n\t\t{\n\t\t\tif len(p) < IPv4PacketHeadLen {\n\t\t\t\treturn ErrTooShort\n\t\t\t}\n\t\t\tif len(ip) < net.IPv4len {\n\t\t\t\treturn ErrBadFormat\n\t\t\t}\n\t\t\tif t == SourceIP {\n\t\t\t\tcopy(p[12:16], ip[len(ip)-net.IPv4len:])\n\t\t\t} else {\n\t\t\t\tcopy(p[16:20], ip[len(ip)-net.IPv4len:])\n\t\t\t}\n\t\t\tp.GenerateChecksum()\n\t\t\treturn nil\n\t\t}\n\tcase 6:\n\t\t{\n\t\t\tif len(p) < IPv6PacketHeadLen {\n\t\t\t\treturn ErrTooShort\n\t\t\t}\n\t\t\tif len(ip) < net.IPv6len {\n\t\t\t\treturn ErrBadFormat\n\t\t\t}\n\n\t\t\tif t == SourceIP {\n\t\t\t\tcopy(p[8:24], ip[len(ip)-net.IPv6len:])\n\t\t\t} else {\n\t\t\t\tcopy(p[24:40], ip[len(ip)-net.IPv6len:])\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\tdefault:\n\t\t{\n\t\t\treturn ErrIPPacketBadVersion\n\t\t}\n\t}\n}", "func (u *GameServerUpsertBulk) SetIPAddress(v string) *GameServerUpsertBulk {\n\treturn u.Update(func(s *GameServerUpsert) {\n\t\ts.SetIPAddress(v)\n\t})\n}", "func provideIP(w http.ResponseWriter, req *http.Request) {\n\tresult, error := evaluateIPAddress(req)\n\tif error != nil {\n\t\tresult.Error = error.Error()\n\t}\n\tgo result.fetchGeoAndPersist()\n\n\twriteReponse(w, req, ipTemplate, result)\n}", "func (m *UserSimulationEventInfo) SetIpAddress(value *string)() {\n m.ipAddress = value\n}", "func (isrtu *IPStaticRoutingTableUpdate) Save(ctx context.Context) (int, error) {\n\tvar (\n\t\terr error\n\t\taffected int\n\t)\n\tif len(isrtu.hooks) == 0 {\n\t\tif err = isrtu.check(); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\taffected, err = isrtu.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*IPStaticRoutingTableMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tif err = isrtu.check(); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tisrtu.mutation = mutation\n\t\t\taffected, err = isrtu.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn affected, err\n\t\t})\n\t\tfor i := len(isrtu.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = isrtu.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, isrtu.mutation); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn affected, err\n}", "func toIPAddress(iip iPAddressv4) (ip hosting.IPAddress) {\n\tip.ID = strconv.Itoa(iip.ID)\n\tip.IP = iip.IP\n\tip.RegionID = strconv.Itoa(iip.RegionID)\n\tip.State = iip.State\n\tip.VM = strconv.Itoa(iip.VM)\n\n\tif iip.Version == 6 {\n\t\tip.Version = hosting.IPv6\n\t} else {\n\t\tip.Version = hosting.IPv4\n\t}\n\n\treturn\n}", "func (o *WafEventNetwork) HasIp() bool {\n\tif o != nil && o.Ip != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (n *NetIf) SetIP(ip string) error {\n\tn.IP.IP = net.ParseIP(ip)\n\tif n.IP.IP == nil && ip != \"\" {\n\t\treturn fmt.Errorf(\"failed to parse Netif, bad IP: %s\", ip)\n\t}\n\n\treturn nil\n}", "func (auo *AddressUpdateOne) Save(ctx context.Context) (*Address, error) {\n\tvar (\n\t\terr error\n\t\tnode *Address\n\t)\n\tif len(auo.hooks) == 0 {\n\t\tnode, err = auo.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*AddressMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tauo.mutation = mutation\n\t\t\tnode, err = auo.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn node, err\n\t\t})\n\t\tfor i := len(auo.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = auo.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, auo.mutation); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn node, err\n}", "func AddIPblock() {\n\tvar ipaddress string\n\tvar hostname string\n\tipaddress, hostname = AskIPaddressToUser()\n\tctrl := ReturnipField(ipaddress, hostname)\n\tif ctrl {\n\t\tWriteHostFile(LinesHost)\n\t\tfmt.Println(\"\\nSuccess !\")\n\t\tLastViewoftheFile()\n\t}\n}", "func (bc *BouncerCreate) SetIPAddress(s string) *BouncerCreate {\n\tbc.mutation.SetIPAddress(s)\n\treturn bc\n}", "func Save(gw Gateway) (*mgo.ChangeInfo, error) {\n\treturn collection().UpsertId(gw.MAC, gw)\n}", "func (inst *PasswordResetN) Save(ctx context.Context, onlyFields ...string) error {\n\tif inst.passwordResetModel == nil {\n\t\treturn query.ErrModelNotSet\n\t}\n\n\tid, _, err := inst.passwordResetModel.SaveOrUpdate(ctx, *inst, onlyFields...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.Id = null.IntFrom(id)\n\treturn nil\n}", "func setLastTo(t *testing.T, network *net.IPNet, ip net.IP) {\n\tif err := ReleaseIP(network, ip); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tret, err := RequestIP(network, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tassertIPEquals(t, ip, ret)\n\n\tif err := ReleaseIP(network, ip); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (u *GameServerUpsertOne) SetIPAddress(v string) *GameServerUpsertOne {\n\treturn u.Update(func(s *GameServerUpsert) {\n\t\ts.SetIPAddress(v)\n\t})\n}", "func (b *Bridge) setIP() error {\n\tif b.IP == nil {\n\t\treturn nil\n\t}\n\tcmd := exec.Command(\"ifconfig\", b.Device, b.IP.String(), \"netmask\", fmt.Sprintf(\"0x%s\", b.Netmask.String()))\n\tfmt.Printf(\"cmd: %s\\n\", strings.Join(cmd.Args, \" \"))\n\treturn cmd.Run()\n}", "func (s *CreateUserEndpoint) saveToDB(user *User) (int, error) {\n\t// implementation removed\n\treturn 0, nil\n}", "func (dqu *DNSBLQueryUpdate) SetIPAddress(i *IP) *DNSBLQueryUpdate {\n\treturn dqu.SetIPAddressID(i.ID)\n}", "func (sqlDb *SqliteDB) GetIp(ipAddr string) (*model.IP, error) {\n\trow := db.QueryRow(\"SELECT * FROM ip WHERE ip_address = ?\", ipAddr)\n\tip := model.IP{}\n\n\terr := row.Scan(&ip.IPAddress, &ip.UUID, &ip.CreatedAt, &ip.UpdatedAt, &ip.ResponseCode)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn nil, nil\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\n\t}\n\treturn &ip, nil\n\n}", "func (u *GameServerUpsert) SetIPAddress(v string) *GameServerUpsert {\n\tu.Set(gameserver.FieldIPAddress, v)\n\treturn u\n}", "func (assign AddressAssignment) EqualIP(b AddressAssignment) bool {\n\tif assign.PoolID != b.PoolID {\n\t\treturn false\n\t} else if assign.IPAddr == b.IPAddr {\n\t\treturn false\n\t} else if assign.Port != b.Port {\n\t\treturn false\n\t} else if assign.ServiceID != b.ServiceID {\n\t\treturn false\n\t} else if assign.EndpointName != b.EndpointName {\n\t\treturn false\n\t}\n\treturn true\n}", "func (f *freeClientPool) saveToDb() {\n\tnow := f.clock.Now()\n\tstorage := freeClientPoolStorage{\n\t\tLogOffset: uint64(f.logOffset(now)),\n\t\tList: make([]*freeClientPoolEntry, len(f.addressMap)),\n\t}\n\ti := 0\n\tfor _, e := range f.addressMap {\n\t\tif e.connected {\n\t\t\tf.calcLogUsage(e, now)\n\t\t}\n\t\tstorage.List[i] = e\n\t\ti++\n\t}\n\tenc, err := rlp.EncodeToBytes(storage)\n\tif err != nil {\n\t\tlog.Error(\"Failed to encode client list\", \"err\", err)\n\t} else {\n\t\tf.db.Put([]byte(\"freeClientPool\"), enc)\n\t}\n}", "func (cc *CloudComb) CreateIP(params string) ([]string, []string, error) {\n\tif params == \"\" {\n\t\treturn nil, nil, errors.New(\"Params is missed\")\n\t}\n\tparams = PurifyParams(params)\n\n\tbody := bytes.NewBufferString(params)\n\n\t// do rest request\n\tresult, _, err := cc.doRESTRequest(\"POST\", \"/api/v1/ips\", \"\", nil, body)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// create response messages\n\n\ttype ip struct {\n\t\tId string `json:\"id\"`\n\t\tIp string `json:\"ip\"`\n\t\tStatus string `json:\"status\"`\n\t\tIpType string `json:\"type\"`\n\t\tServiceId string `json:\"service_id\"`\n\t\tServiceName string `json:\"service_name\"`\n\t\tServiceType string `json:\"service_type\"`\n\t\tCreateAt string `json:\"create_at\"`\n\t\tUpdateAt string `json:\"update_at\"`\n\t}\n\n\ttype createIpRes struct {\n\t\tTotal uint `json:\"total\"`\n\t\tIps []ip `json:\"ips\"`\n\t}\n\tvar res createIpRes\n\n\t// parse json\n\tif err := json.NewDecoder(strings.NewReader(result)).Decode(&res); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar ids []string\n\tvar ips []string\n\tfor _, v := range res.Ips {\n\t\tids = append(ids, v.Id)\n\t\tips = append(ips, v.Ip)\n\t}\n\n\treturn ids, ips, nil\n}", "func (r *Repository) Save(url string, toAddr, ccAddr []string, content string, status int) error {\n\treturn nil\n}", "func (in *ActionUserSessionIndexInput) SetApiIpAddr(value string) *ActionUserSessionIndexInput {\n\tin.ApiIpAddr = value\n\n\tif in._selectedParameters == nil {\n\t\tin._selectedParameters = make(map[string]interface{})\n\t}\n\n\tin._selectedParameters[\"ApiIpAddr\"] = nil\n\treturn in\n}", "func NewIP(value net.IP) (ip IP) {\n\treturn IP{IP: value}\n}", "func IsIP(val interface{}) bool {\n\treturn isMatch(ip, val)\n}", "func (o *NetworkingProjectNetadpCreate) GetIp() []string {\n\tif o == nil || o.Ip == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.Ip\n}", "func (isrtuo *IPStaticRoutingTableUpdateOne) Save(ctx context.Context) (*IPStaticRoutingTable, error) {\n\tvar (\n\t\terr error\n\t\tnode *IPStaticRoutingTable\n\t)\n\tif len(isrtuo.hooks) == 0 {\n\t\tif err = isrtuo.check(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnode, err = isrtuo.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*IPStaticRoutingTableMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tif err = isrtuo.check(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tisrtuo.mutation = mutation\n\t\t\tnode, err = isrtuo.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn node, err\n\t\t})\n\t\tfor i := len(isrtuo.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = isrtuo.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, isrtuo.mutation); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn node, err\n}", "func (h *HostInfo) SetIP(ip net.IP) {\n\tif ip == nil {\n\t\treturn\n\t}\n\th.ip = ip\n\th.targetWithPort = ip.String() + \":\" + h.port\n}", "func (eptu *EquipmentPortTypeUpdate) Save(ctx context.Context) (int, error) {\n\tif eptu.update_time == nil {\n\t\tv := equipmentporttype.UpdateDefaultUpdateTime()\n\t\teptu.update_time = &v\n\t}\n\treturn eptu.sqlSave(ctx)\n}", "func CheckIp(ip string) int {\n return 0\n\n}", "func (o *IppoolPoolMember) SetIpType(v string) {\n\to.IpType = &v\n}", "func (s *SCIONBoxController) HBCheckIP(slas *models.SCIONLabAS, ip string, ia IA,\n\tr *http.Request) (bool, error) {\n\tvar needGen = false\n\tif utility.IPCompare(ip, slas.PublicIP) != 0 {\n\t\t// The IP address of the Box has changed update the DB\n\t\tif err := s.HBChangedIP(slas, ip); err != nil {\n\t\t\treturn false, fmt.Errorf(\"error updating the Box Connectons with changed IP: %v\",\n\t\t\t\terr)\n\t\t}\n\t\tneedGen = true\n\t\treturn needGen, nil\n\t} else {\n\t\t// Update the database using the list of received Neighbors\n\t\tif err := s.updateDBConnections(slas, ia.Connections); err != nil {\n\t\t\treturn needGen, fmt.Errorf(\"error updating the Box Connectons: %v\",\n\t\t\t\terr)\n\t\t}\n\t}\n\treturn needGen, nil\n}", "func (d *DB) checkip(ip string) (iptype uint32, ipnum uint128.Uint128, ipindex uint32) {\n\tiptype = 0\n\tipnum = uint128.From64(0)\n\tipnumtmp := uint128.From64(0)\n\tipindex = 0\n\tipaddress := net.ParseIP(ip)\n\n\tif ipaddress != nil {\n\t\tv4 := ipaddress.To4()\n\n\t\tif v4 != nil {\n\t\t\tiptype = 4\n\t\t\tipnum = uint128.From64(uint64(binary.BigEndian.Uint32(v4)))\n\t\t} else {\n\t\t\tv6 := ipaddress.To16()\n\n\t\t\tif v6 != nil {\n\t\t\t\tiptype = 6\n\t\t\t\treverseBytes(v6)\n\t\t\t\tipnum = uint128.FromBytes(v6)\n\n\t\t\t\tif ipnum.Cmp(from_v4mapped) >= 0 && ipnum.Cmp(to_v4mapped) <= 0 {\n\t\t\t\t\t// ipv4-mapped ipv6 should treat as ipv4 and read ipv4 data section\n\t\t\t\t\tiptype = 4\n\t\t\t\t\tipnum = ipnum.Sub(from_v4mapped)\n\t\t\t\t} else if ipnum.Cmp(from_6to4) >= 0 && ipnum.Cmp(to_6to4) <= 0 {\n\t\t\t\t\t// 6to4 so need to remap to ipv4\n\t\t\t\t\tiptype = 4\n\t\t\t\t\tipnum = ipnum.Rsh(80)\n\t\t\t\t\tipnum = ipnum.And(last_32bits)\n\t\t\t\t} else if ipnum.Cmp(from_teredo) >= 0 && ipnum.Cmp(to_teredo) <= 0 {\n\t\t\t\t\t// Teredo so need to remap to ipv4\n\t\t\t\t\tiptype = 4\n\t\t\t\t\tipnum = uint128.Uint128{^ipnum.Lo, ^ipnum.Hi}\n\t\t\t\t\tipnum = ipnum.And(last_32bits)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif iptype == 4 {\n\t\tif d.meta.ipv4indexed {\n\t\t\tipnumtmp = ipnum.Rsh(16)\n\t\t\tipnumtmp = ipnumtmp.Lsh(3)\n\t\t\tipindex = uint32(ipnumtmp.Add(uint128.From64(uint64(d.meta.ipv4indexbaseaddr))).Lo)\n\t\t}\n\t} else if iptype == 6 {\n\t\tif d.meta.ipv6indexed {\n\t\t\tipnumtmp = ipnum.Rsh(112)\n\t\t\tipnumtmp = ipnumtmp.Lsh(3)\n\t\t\tipindex = uint32(ipnumtmp.Add(uint128.From64(uint64(d.meta.ipv6indexbaseaddr))).Lo)\n\t\t}\n\t}\n\treturn\n}", "func checkIP(ipAddress string, countries []string) (bool, error) {\n\tif ipAddress == \"\" {\n\t\treturn false, nil\n\t}\n\n\t// Look at the file, see if it needs to be updated.\n\tfileLock.Lock()\n\tif time.Now().After(lastPull.Add(pullTick)) {\n\t\tfileLock.Unlock()\n\t\tif err := pullRecord(); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tfileLock.Lock()\n\t}\n\tdefer fileLock.Unlock()\n\n\tdb, err := geoip2.Open(countryMMDB)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer db.Close()\n\n\tip := net.ParseIP(ipAddress)\n\trecord, err := db.Country(ip)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// TODO: Verify that we're using ISOCode.\n\tfor _, country := range countries {\n\t\t// Lets match to the ISOCode. Seems like a good idea.\n\t\tif country == record.Country.IsoCode {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func cloneIP(ip net.IP) net.IP {\n\tclone := make(net.IP, len(ip))\n\tcopy(clone, ip)\n\treturn clone\n}", "func cloneIP(ip net.IP) net.IP {\n\tclone := make(net.IP, len(ip))\n\tcopy(clone, ip)\n\treturn clone\n}", "func (g *IPCheck) Check(ip string) (ret bool) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Printf(\"error geo.Check failed: %v\", r)\n\t\t\tret = true\n\t\t}\n\t}()\n\tif isLocalAddress(ip) {\n\t\treturn true\n\t}\n\tg.RLock()\n\tval, ok := g.cache[ip]\n\tg.RUnlock()\n\tif ok {\n\t\treturn val\n\t}\n\n\tif g.checkIPWhitelist(ip) {\n\t\tg.Lock()\n\t\tdefer g.Unlock()\n\t\tg.cache[ip] = true\n\t\treturn true\n\t}\n\n\tloc := g.handle.GetLocationByIP(ip)\n\tif loc != nil {\n\t\tval := loc.CountryCode == \"HR\"\n\t\tg.Lock()\n\t\tdefer g.Unlock()\n\t\tg.cache[ip] = val\n\t\treturn val\n\t}\n\tlog.Printf(\"warn ip %s not found in database\", ip)\n\treturn false\n}", "func (l *liveDNSConfig) Set(ip string) error {\n\tbody := &bytes.Buffer{}\n\terr := json.NewEncoder(body).Encode(&liveDNSRecord{TTL: 300, Values: []string{ip}})\n\tif err != nil {\n\t\treturn err\n\t}\n\tres, err := l.req(\"PUT\", body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// we should get a created code\n\tif res.StatusCode != http.StatusCreated {\n\t\treturn fmt.Errorf(\"Unexpected Response Status Code [%d]\", res.StatusCode)\n\t}\n\treturn nil\n}", "func (o ResolverRuleTargetIpOutput) Ip() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResolverRuleTargetIp) string { return v.Ip }).(pulumi.StringOutput)\n}", "func (o *NetworkingProjectNetadpCreate) GetIpOk() ([]string, bool) {\n\tif o == nil || o.Ip == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Ip, true\n}", "func (m *NetworkIP) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPAddress(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *IaasDeviceStatusAllOf) SetIpAddress(v string) {\n\to.IpAddress = &v\n}", "func (s *AssociateResolverEndpointIpAddressInput) SetIpAddress(v *IpAddressUpdate) *AssociateResolverEndpointIpAddressInput {\n\ts.IpAddress = v\n\treturn s\n}", "func (s *AssociateResolverEndpointIpAddressInput) SetIpAddress(v *IpAddressUpdate) *AssociateResolverEndpointIpAddressInput {\n\ts.IpAddress = v\n\treturn s\n}", "func (i *ProjectIPServiceOp) Create(projectID string, ipReservationReq *IPReservationCreateRequest) (*IPAddressReservation, *Response, error) {\n\tif validateErr := ValidateUUID(projectID); validateErr != nil {\n\t\treturn nil, nil, validateErr\n\t}\n\tapiPath := path.Join(projectBasePath, projectID, ipBasePath)\n\tipr := new(IPAddressReservation)\n\n\tresp, err := i.client.DoRequest(\"POST\", apiPath, ipReservationReq, ipr)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn ipr, resp, err\n}" ]
[ "0.68685144", "0.6585625", "0.62615025", "0.6082222", "0.6036351", "0.60349107", "0.6005871", "0.5975198", "0.5974457", "0.5963693", "0.58368796", "0.57715946", "0.5694154", "0.5694154", "0.5683421", "0.565358", "0.56393373", "0.56249624", "0.55932415", "0.5569949", "0.554892", "0.55458957", "0.55458957", "0.5534855", "0.55321014", "0.5526697", "0.55154485", "0.54880756", "0.54880756", "0.5470361", "0.5454235", "0.5454235", "0.5402894", "0.5391536", "0.5386915", "0.5386596", "0.5368798", "0.533761", "0.52958435", "0.5293033", "0.5270239", "0.5269338", "0.52656347", "0.5247201", "0.52434295", "0.5242784", "0.52351665", "0.52346945", "0.52022713", "0.51983494", "0.5173632", "0.51732737", "0.51729816", "0.51631457", "0.5141767", "0.5111317", "0.50885606", "0.5084874", "0.5082152", "0.5081126", "0.508081", "0.5079669", "0.5074866", "0.50747216", "0.50576174", "0.5049251", "0.504462", "0.50421566", "0.5034437", "0.5033929", "0.50095665", "0.50082594", "0.5007726", "0.49852377", "0.49842545", "0.49748665", "0.496834", "0.4957616", "0.49499184", "0.4925369", "0.4924372", "0.4918629", "0.49023864", "0.4894177", "0.48922482", "0.48818022", "0.488088", "0.48783645", "0.4873258", "0.48727232", "0.48727232", "0.4872715", "0.48630527", "0.48500583", "0.48458797", "0.48367298", "0.48322955", "0.4830922", "0.4830922", "0.4829771" ]
0.83904064
0
GetIp returns a model.IP if record is stored in the database or nil if not exists
func (sqlDb *SqliteDB) GetIp(ipAddr string) (*model.IP, error) { row := db.QueryRow("SELECT * FROM ip WHERE ip_address = ?", ipAddr) ip := model.IP{} err := row.Scan(&ip.IPAddress, &ip.UUID, &ip.CreatedAt, &ip.UpdatedAt, &ip.ResponseCode) if err != nil { if err == sql.ErrNoRows { return nil, nil } else { return nil, err } } return &ip, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (test *Test) GetIP(projectName string, ip string) (models.IP, error) {\n\treturn tests.NormalIPs[0], nil\n}", "func (d *Driver) GetIP() (string, error) {\n\td.connectAPI()\n\treturn d.driver.GetEth0IPv4(d.Node, d.VMID)\n}", "func (st *Store) GetIP(host string) string {\n\tfor ip, hosts := range st.Records {\n\t\tif _, ok := hosts[host]; ok {\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn \"\"\n}", "func (s Store) GetIP(mac net.HardwareAddr) (ip net.IP, err error) {\n\tl := &Lease{}\n\tl, err = s.leases.Mac(mac)\n\tif err != nil {\n\t\tlogger.Error(\"lease error %s\", err)\n\t\treturn nil, err\n\t}\n\tip = net.ParseIP(l.IP)\n\tlogger.Critical(\"Lease IP : %s\", ip)\n\treturn ip, nil\n}", "func (d *Driver) GetIP() (string, error) {\n\tvm, err := d.getVirtualMachine(true)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn vm.PrimaryIP().String(), nil\n}", "func (o *WafEventNetwork) GetIp() string {\n\tif o == nil || o.Ip == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Ip\n}", "func (d *driverMock) GetInstanceIP(ctx context.Context, id string) (string, error) {\n\tif d.GetInstanceIPErr != nil {\n\t\treturn \"\", d.GetInstanceIPErr\n\t}\n\tif d.cfg.UsePrivateIP {\n\t\treturn \"private_ip\", nil\n\t}\n\treturn \"ip\", nil\n}", "func (m *ServicePrincipalRiskDetection) GetIpAddress()(*string) {\n val, err := m.GetBackingStore().Get(\"ipAddress\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (l Lease) GetIP() (ip net.IP) {\n\treturn net.ParseIP(l.IP)\n}", "func GetIP(vrf uint64, ip *bnet.IP) *IPAddress {\n\treturn pkgIPCache.get(vrf, ip)\n}", "func (ip *IPAddress)GetIpAddress() (ipAddress string, err error){\n\tnetInterfaces, err := net.Interfaces()\n\tif err != nil{\n\t\tlog4go.Error(err)\n\t\treturn\n\t}\n\tLoop:\n\tfor i := 0; i < len(netInterfaces); i++{\n\t\tif(netInterfaces[i].Flags & net.FlagUp) != 0{\n\t\t\taddrs, _ := netInterfaces[i].Addrs()\n\t\t\tfor _, address := range addrs{\n\t\t\t\tif ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback(){\n\t\t\t\t\tif ipnet.IP.To4()!=nil{\n\t\t\t\t\t\tipAddress = (ipnet.IP.String())\n\t\t\t\t\t\tip.IpAddress = ipAddress\n\t\t\t\t\t\tip.IpValid = true\n\t\t\t\t\t\tbreak Loop\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (o *WafEventNetwork) GetIpOk() (*string, bool) {\n\tif o == nil || o.Ip == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Ip, true\n}", "func getIP() (net.IP, error) {\n\tres, err := http.Get(\"http://checkip.amazonaws.com/\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer res.Body.Close()\n\tresData, err := ioutil.ReadAll(res.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawIP := strings.Trim(string(resData), \"\\n\")\n\n\treturn net.ParseIP(rawIP), nil\n}", "func (ia IfAddr) GetIP() string { return ia.IfaIP }", "func (m *BgpConfiguration) GetIpAddress()(*string) {\n val, err := m.GetBackingStore().Get(\"ipAddress\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (s *Storage) GetOne(value string) (*models.IP, error) {\n\tses := s.GetDBSession()\n\tdefer ses.Close()\n\tt := models.NewIP()\n\terr := ses.DB(s.database).C(s.table).Find(bson.M{\"data\": value}).One(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn t, nil\n}", "func GetIp() string {\n\n\tout, err := exc.ExecuteWithBash(\"ip route get 1.1.1.1 | grep -oP 'src \\\\K\\\\S+'\")\n\n\tip := strings.TrimSpace(out)\n\tif log.Check(log.WarnLevel, \"Getting RH IP \"+ip, err) {\n\t\treturn \"\"\n\t}\n\n\treturn ip\n}", "func (m *EntityDTO_StorageData) GetIpAddress() string {\n\tif m != nil && m.IpAddress != nil {\n\t\treturn *m.IpAddress\n\t}\n\treturn \"\"\n}", "func (c *Client) GetIP(id string, privateIPOnly bool) (string, error) {\n\tvar (\n\t\tmethod = \"GET\"\n\t\turi = fmt.Sprintf(\"%s/%s\", \"server\", id)\n\t)\n\n\tdata, err := c.newRequest(method, uri, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar s sakura.Response\n\tif err := json.Unmarshal(data, &s); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif privateIPOnly && len(s.Server.Interfaces) > 1 {\n\t\treturn s.Server.Interfaces[1].UserIPAddress, nil\n\t}\n\n\treturn s.Server.Interfaces[0].IPAddress, nil\n}", "func (_class PIFClass) GetIP(sessionID SessionRef, self PIFRef) (_retval string, _err error) {\n\t_method := \"PIF.get_IP\"\n\t_sessionIDArg, _err := convertSessionRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"session_id\"), sessionID)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_selfArg, _err := convertPIFRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"self\"), self)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_result, _err := _class.client.APICall(_method, _sessionIDArg, _selfArg)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_retval, _err = convertStringToGo(_method + \" -> \", _result.Value)\n\treturn\n}", "func (store *AttachmentStore) GetIPForNetwork(networkID string) (net.IP, bool) {\n\tstore.Lock()\n\tdefer store.Unlock()\n\tip, exists := store.networkToNodeLBIP[networkID]\n\treturn ip, exists\n}", "func GetIP(site, UserAgent string) string {\n\treturn getAPIResponse(site, UserAgent).ResponseIP\n}", "func GetIP() (ip string) {\n\n\thost, _ := os.Hostname()\n\taddrs, _ := net.LookupIP(host)\n\tfor _, addr := range addrs {\n\t\tif ipv4 := addr.To4(); ipv4 != nil && !ipv4.IsLoopback() {\n\t\t\t//fmt.Println(\"IPv4: \", ipv4.String())\n\t\t\tip = ipv4.String()\n\t\t}\n\t}\n\treturn ip\n}", "func (docker *Docker) GetIP() string {\n\treturn docker.ip\n}", "func GetIP() (ip string, err error) {\n\tvar res *http.Response\n\tif res, err = http.Get(API); err != nil {\n\t\treturn\n\t}\n\n\t// We could just get the string but for the sake of\n\t// consistency we go json.\n\tresIP := new(IPResponse)\n\tif err = json.NewDecoder(res.Body).Decode(resIP); err != nil {\n\t\treturn\n\t}\n\n\tip = resIP.IP\n\treturn\n}", "func (c *IPClient) Get(ctx context.Context, id uuid.UUID) (*IP, error) {\n\treturn c.Query().Where(ip.ID(id)).Only(ctx)\n}", "func GetIp() (string, int) {\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tvar ip string\n\tvar index int\n\tfor _, address := range addrs {\n\t\tif ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\tif ipnet.IP.To4() != nil {\n\t\t\t\tfor i := 0; i < len(straddr); i++ {\n\t\t\t\t\tif (ipnet.IP.String() == straddr[i]) {\n\t\t\t\t\t\tip = straddr[i]\n\t\t\t\t\t\tindex = i+1\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn ip, index\n}", "func (g *DB) Lookup(ip net.IP) (*GeoIP, error) {\n\tif ip == nil {\n\t\treturn nil, fmt.Errorf(\"ip is nil\")\n\t}\n\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\n\tres := &GeoIP{\n\t\tIP: ip,\n\t}\n\n\t// ANONYMOUS IP\n\t//\n\tanon, err := g.reader.AnonymousIP(ip)\n\tif err == nil {\n\t\tres.Anonymous = Anonymous{\n\t\t\tIsAnonymous: anon.IsAnonymous,\n\t\t\tIsAnonymousVPN: anon.IsAnonymousVPN,\n\t\t\tIsHostingProvider: anon.IsHostingProvider,\n\t\t\tIsPublicProxy: anon.IsPublicProxy,\n\t\t\tIsTorExitNode: anon.IsTorExitNode,\n\t\t}\n\t}\n\n\t// CITY\n\t//\n\tcity, err := g.reader.City(ip)\n\tif err == nil {\n\t\tsubdivisions := make([]string, len(city.Subdivisions), len(city.Subdivisions))\n\t\tfor i, sd := range city.Subdivisions {\n\t\t\tsubdivisions[i] = sd.Names[\"en\"]\n\t\t}\n\n\t\tres.City = City{\n\t\t\tAccuracyRadius: city.Location.AccuracyRadius,\n\t\t\tContinent: city.Continent.Names[\"en\"],\n\t\t\tContinentCode: city.Continent.Code,\n\t\t\tCountry: city.Country.Names[\"en\"],\n\t\t\tCountryCode: city.Country.IsoCode,\n\t\t\tIsAnonymousProxy: city.Traits.IsAnonymousProxy,\n\t\t\tIsSatelliteProvider: city.Traits.IsSatelliteProvider,\n\t\t\tLatitude: city.Location.Latitude,\n\t\t\tLongitude: city.Location.Longitude,\n\t\t\tMetroCode: city.Location.MetroCode,\n\t\t\tName: city.City.Names[\"en\"],\n\t\t\tPostcode: city.Postal.Code,\n\t\t\tRegisteredCountry: city.RegisteredCountry.Names[\"en\"],\n\t\t\tRegisteredCountryCode: city.RegisteredCountry.IsoCode,\n\t\t\tRepresentedCountry: city.RepresentedCountry.Names[\"en\"],\n\t\t\tRepresentedCountryCode: city.RepresentedCountry.IsoCode,\n\t\t\tRepresentedCountryType: city.RepresentedCountry.Type,\n\t\t\tSubdivisions: subdivisions,\n\t\t\tTimezone: city.Location.TimeZone,\n\t\t}\n\t} else {\n\t\treturn nil, fmt.Errorf(\"failed to load city data for %s\", ip)\n\t}\n\n\t// COUNTRY\n\t//\n\tcountry, err := g.reader.Country(ip)\n\tif err == nil {\n\t\tres.Country = Country{\n\t\t\tContinent: country.Continent.Names[\"en\"],\n\t\t\tContinentCode: country.Continent.Code,\n\t\t\tCountry: country.Country.Names[\"en\"],\n\t\t\tCountryCode: country.Country.IsoCode,\n\t\t\tIsAnonymousProxy: country.Traits.IsAnonymousProxy,\n\t\t\tIsSatelliteProvider: country.Traits.IsSatelliteProvider,\n\t\t\tRegisteredCountry: country.RegisteredCountry.Names[\"en\"],\n\t\t\tRegisteredCountryCode: country.RegisteredCountry.IsoCode,\n\t\t\tRepresentedCountry: country.RepresentedCountry.Names[\"en\"],\n\t\t\tRepresentedCountryCode: country.RepresentedCountry.IsoCode,\n\t\t\tRepresentedCountryType: country.RepresentedCountry.Type,\n\t\t}\n\t} else {\n\t\treturn nil, fmt.Errorf(\"failed to load country data for %s\", ip)\n\t}\n\n\treturn res, nil\n}", "func GetIP(r *http.Request, options ...*KeyOptions) net.IP {\n\tif len(options) >= 1 && options[0].TrustForwardHeader {\n\t\tip := r.Header.Get(\"X-Forwarded-For\")\n\t\tif ip != \"\" {\n\t\t\tparts := strings.SplitN(ip, \",\", 2)\n\t\t\tpart := strings.TrimSpace(parts[0])\n\t\t\treturn net.ParseIP(part)\n\t\t}\n\n\t\tip = strings.TrimSpace(r.Header.Get(\"X-Real-IP\"))\n\t\tif ip != \"\" {\n\t\t\treturn net.ParseIP(ip)\n\t\t}\n\t}\n\n\tremoteAddr := strings.TrimSpace(r.RemoteAddr)\n\thost, _, err := net.SplitHostPort(remoteAddr)\n\tif err != nil {\n\t\treturn net.ParseIP(remoteAddr)\n\t}\n\n\treturn net.ParseIP(host)\n}", "func (o *NetworkingProjectNetadpCreate) GetIp() []string {\n\tif o == nil || o.Ip == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.Ip\n}", "func GetIP() string {\n\tIP := os.Getenv(\"MYPRVIP\")\n\tif IP != \"\" {\n\t\treturn IP\n\t}\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\tlog.Printf(\"error in GetIP - %v\\n\", err)\n\t\treturn \"\"\n\t}\n\tfor _, address := range addrs {\n\t\t// return the first address that is not a loopback\n\t\tif ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\tif ipnet.IP.To4() != nil {\n\t\t\t\treturn ipnet.IP.String()\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (db *ipDBManager) getIP(iface string) (string, int) {\n\tif addr, ok := db.interfaces[iface]; ok {\n\t\treturn addr, db.prefixLen\n\t}\n\n\tfor k, v := range db.ips {\n\t\tif v == \"\" {\n\t\t\tdb.ips[k] = iface\n\t\t\tdb.interfaces[iface] = k\n\t\t\treturn k, db.prefixLen\n\t\t}\n\t}\n\tglog.Error(\"IP addresses exhausted\")\n\n\treturn \"1.1.1.1\", 32\n}", "func GetIPAddress(result *gocni.CNIResult, task containerd.Task) (net.IP, error) {\n\t// Get the IP of the created interface\n\tvar ip net.IP\n\tfor ifName, config := range result.Interfaces {\n\t\tif config.Sandbox == NetNamespace(task) {\n\t\t\tfor _, ipConfig := range config.IPConfigs {\n\t\t\t\tif ifName != \"lo\" && ipConfig.IP.To4() != nil {\n\t\t\t\t\tip = ipConfig.IP\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif ip == nil {\n\t\treturn nil, fmt.Errorf(\"unable to get IP address for: %s\", task.ID())\n\t}\n\treturn ip, nil\n}", "func (o *NetworkLoadBalancerForwardingRuleTarget) GetIp() *string {\n\tif o == nil {\n\t\treturn nil\n\t}\n\n\treturn o.Ip\n\n}", "func Get_isp(ipaddress string) IP2Locationrecord {\n\treturn handleError(defaultDB.query(ipaddress, isp))\n}", "func (i *IPGetter) Get(ctx context.Context) (string, error) {\n\tr, err := http.Get(canihaz)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Couldn't retrieve external IP address: %w\", err)\n\t}\n\n\tip, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Could not read IP address from response: %w\", err)\n\t}\n\n\treturn strings.TrimSpace(fmt.Sprintf(\"%s\", ip)), nil\n}", "func getIP(ec2inst ec2Instance,c *ecs.Container) string{\n\tif len(c.NetworkInterfaces) > 0 {\n\t\treturn ptr.StringValue(c.NetworkInterfaces[0].PrivateIpv4Address)\n\t}\n\treturn ptr.StringValue(ec2inst.PrivateIpAddress)\n}", "func (o ResolverEndpointIpAddressOutput) Ip() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ResolverEndpointIpAddress) *string { return v.Ip }).(pulumi.StringPtrOutput)\n}", "func IP(c *fiber.Ctx) string {\n\tvar headerValue []byte\n\tif c.App().Config().ProxyHeader == \"*\" {\n\t\tfor _, headerName := range possibleHeaderes {\n\t\t\theaderValue = c.Request().Header.Peek(headerName)\n\t\t\tif len(headerValue) > 3 {\n\t\t\t\treturn string(fetchIpFromString.Find(headerValue))\n\t\t\t}\n\t\t}\n\t}\n\theaderValue = []byte(c.IP())\n\tif len(headerValue) <= 3 {\n\t\theaderValue = []byte(\"0.0.0.0\")\n\t}\n\n\t// find ip address in string\n\treturn string(fetchIpFromString.Find(headerValue))\n}", "func (d *DB) Get_isp(ipaddress string) (IP2Locationrecord, error) {\n\treturn d.query(ipaddress, isp)\n}", "func (cce *CCEClient) GetIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) {\n\t// TODO\n\treturn \"\", nil\n}", "func (ctx *Context) IP() string {\r\n\tvar ip = strings.Split(ctx.R.RemoteAddr, \":\")\r\n\tif len(ip) > 0 {\r\n\t\tif ip[0] != \"[\" {\r\n\t\t\treturn ip[0]\r\n\t\t}\r\n\t}\r\n\treturn \"127.0.0.1\"\r\n}", "func (b *OGame) GetPublicIP() (string, error) {\n\treturn b.getPublicIP()\n}", "func getIPFrom(dbConn ucdb.Db, ipStr string) (net.IP, *net.IPNet, error) {\n\tlog.Debug(\"\")\n\tincIP := func(ip net.IP) {\n\t\tfor j := len(ip) - 1; j >= 0; j-- {\n\t\t\tip[j]++\n\t\t\tif ip[j] > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tisNetworkAddr := func(ip net.IP, ipnet net.IPNet) bool {\n\t\treturn ip.Equal(ipnet.IP)\n\t}\n\tip, ipnet, err := net.ParseCIDR(ipStr)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tlog.Debug(\"Checking if IP %+v is a network address or an unique IP\", ip)\n\tif isNetworkAddr(ip, *ipnet) {\n\t\tlog.Debug(\"Is a network addr\")\n\t\tincIP(ip)\n\t\tlog.Debug(\"Incremented ip %+v\", ip)\n\t\tfor ipnet.Contains(ip) {\n\t\t\tif err = dbConn.PutIP(ip); err != nil {\n\t\t\t\tlog.Debug(\"Error %s\", err)\n\t\t\t\tincIP(ip)\n\t\t\t} else {\n\t\t\t\treturn ip, ipnet, nil\n\t\t\t}\n\t\t}\n\t\treturn nil, nil, fmt.Errorf(\"reached maximum IPs used\")\n\t} else {\n\t\tlog.Debug(\"Is an unique IP\")\n\t\tif err = dbConn.PutIP(ip); err == nil {\n\t\t\treturn ip, ipnet, nil\n\t\t} else {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n}", "func GetIP(r *http.Request) (string, string) {\n\tfwd := r.Header.Get(\"X-Forwarded-For\")\n\taddrStr := \"\"\n\n\tif fwd != \"\" {\n\t\taddrStr = fwd\n\t} else {\n\t\taddrStr = r.RemoteAddr\n\t}\n\taddr := strings.Split(addrStr, \":\")\n\n\treturn addr[0], addr[1]\n}", "func (irkit *Irkit) GetIPAddress() string {\n\treturn irkit.Address\n}", "func (o *NetworkingProjectNetadpCreate) GetIpOk() ([]string, bool) {\n\tif o == nil || o.Ip == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Ip, true\n}", "func GetIP(r *http.Request) string {\n\tforwarded := r.Header.Get(\"X-FORWARDED-FOR\")\n\tif forwarded != \"\" {\n\t\treturn forwarded\n\t}\n\n\treturn r.RemoteAddr\n}", "func (g *IPCheck) Check(ip string) (ret bool) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Printf(\"error geo.Check failed: %v\", r)\n\t\t\tret = true\n\t\t}\n\t}()\n\tif isLocalAddress(ip) {\n\t\treturn true\n\t}\n\tg.RLock()\n\tval, ok := g.cache[ip]\n\tg.RUnlock()\n\tif ok {\n\t\treturn val\n\t}\n\n\tif g.checkIPWhitelist(ip) {\n\t\tg.Lock()\n\t\tdefer g.Unlock()\n\t\tg.cache[ip] = true\n\t\treturn true\n\t}\n\n\tloc := g.handle.GetLocationByIP(ip)\n\tif loc != nil {\n\t\tval := loc.CountryCode == \"HR\"\n\t\tg.Lock()\n\t\tdefer g.Unlock()\n\t\tg.cache[ip] = val\n\t\treturn val\n\t}\n\tlog.Printf(\"warn ip %s not found in database\", ip)\n\treturn false\n}", "func GetIP(r *http.Request) string {\n\tforwarded := r.Header.Get(\"X-FORWARDED-FOR\")\n\tif forwarded != \"\" {\n\t\treturn forwarded\n\t}\n\treturn r.RemoteAddr\n}", "func GetIP() string {\n\tifaces, err := net.Interfaces()\n\tterr.PrintError(\"problem collecting local ip address\", err)\n\t// handle err\n\tfor _, i := range ifaces {\n\t\taddrs, err := i.Addrs()\n\t\tterr.PrintError(\"problem collecting local ip address\", err)\n\t\t// handle err\n\t\tfor _, addr := range addrs {\n\t\t\tvar ip net.IP\n\t\t\tswitch v := addr.(type) {\n\t\t\tcase *net.IPNet:\n\t\t\t\tip = v.IP\n\t\t\tcase *net.IPAddr:\n\t\t\t\tip = v.IP\n\t\t\t}\n\t\t\t//if the address is not loopback return it\n\t\t\tif ip.String() != \"127.0.0.1\" && ip.String() != \"::1\" {\n\t\t\t\treturn ip.String()\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (c *Ctx) IP() string {\n\tif len(c.Core.ProxyHeader) > 0 {\n\t\treturn c.Get(c.Core.ProxyHeader)\n\t}\n\treturn c.RemoteIP().String()\n}", "func GetIP(r *http.Request) string {\n\tfwd := r.Header.Get(\"X-FORWARDED-FOR\")\n\tif fwd != \"\" {\n\t\treturn fwd\n\t}\n\treturn r.RemoteAddr\n}", "func getIP(url string) string {\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tdefer res.Body.Close()\n\tb, _ := io.ReadAll(res.Body)\n\treturn string(b)\n}", "func (i *Client) GetIP() (string, error) {\n\thttpClient := i.HTTPClient\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tres, err := httpClient.Get(baseURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer res.Body.Close()\n\n\tip, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(ip), nil\n}", "func (c *Client) GetIPAddress(id, compartmentID string) (string, error) {\n\tvnics, err := c.computeClient.ListVnicAttachments(context.Background(), core.ListVnicAttachmentsRequest{\n\t\tInstanceId: &id,\n\t\tCompartmentId: &compartmentID,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(vnics.Items) == 0 {\n\t\treturn \"\", errors.New(\"instance does not have any configured VNICs\")\n\t}\n\n\tvnic, err := c.virtualNetworkClient.GetVnic(context.Background(), core.GetVnicRequest{VnicId: vnics.Items[0].VnicId})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif vnic.PublicIp == nil {\n\t\treturn *vnic.PrivateIp, nil\n\t}\n\n\treturn *vnic.PublicIp, nil\n}", "func (h *Handler) GetIP(object interface{}) (string, error) {\n\tswitch val := object.(type) {\n\tcase string:\n\t\tpod, err := h.Get(val)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn pod.Status.PodIP, nil\n\tcase *corev1.Pod:\n\t\treturn val.Status.PodIP, nil\n\tcase corev1.Pod:\n\t\treturn val.Status.PodIP, nil\n\tdefault:\n\t\treturn \"\", ErrInvalidToolsType\n\t}\n}", "func (sqlDb *SqliteDB) SaveIp(ip *model.IP) error {\n\tupsert, err := db.Prepare(\"INSERT OR REPLACE INTO ip (ip_address, uuid, created_at, updated_at, response_code) VALUES (?, ?, ?, ?, ?)\")\n\tdefer upsert.Close()\n\tif err != nil {\n\t\treturn errors.New(fmt.Sprint(\"ERROR preparing db insert statement:\", err.Error()))\n\t}\n\t_, err = upsert.Exec(ip.IPAddress, ip.UUID, ip.CreatedAt, ip.UpdatedAt, ip.ResponseCode)\n\n\tif err != nil {\n\t\treturn errors.New(fmt.Sprint(\"ERROR executing DB insert:\", err.Error()))\n\t}\n\treturn nil\n}", "func GetIPAddress(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tplainAddress := vars[\"domainName\"]\n\tif reply, ok := servers[plainAddress]; ok {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tif enc, err := json.Marshal(reply); err == nil {\n\t\t\tw.Write([]byte(enc))\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\t} else {\n\t\treply := r.RemoteAddr\n\t\tif enc, err := json.Marshal(reply); err == nil {\n\t\t\tw.Write([]byte(enc))\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\t}\n}", "func (instance *Host) GetPublicIP(_ context.Context) (_ string, ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tif valid.IsNil(instance) {\n\t\treturn \"\", fail.InvalidInstanceError()\n\t}\n\n\tthis, err := instance.MetadataCore.properties.UnWrap()\n\tif err != nil {\n\t\treturn \"\", fail.ConvertError(err)\n\t}\n\n\tif val, ok := this[hostproperty.NetworkV2]; !ok {\n\t\treturn \"\", fail.NewError(\"corrupted metadata\")\n\t} else {\n\t\tif val == nil {\n\t\t\treturn \"\", fail.NewError(\"corrupted metadata\")\n\t\t}\n\t}\n\n\taclo, err := this[hostproperty.NetworkV2].UnWrap()\n\tif err != nil {\n\t\treturn \"\", fail.ConvertError(err)\n\t}\n\thnV2, _ := aclo.(*propertiesv2.HostNetworking) // nolint\n\n\tpublicIP := hnV2.PublicIPv4\n\tif publicIP == \"\" {\n\t\tpublicIP = hnV2.PublicIPv6\n\t\tif publicIP == \"\" {\n\t\t\treturn \"\", fail.NotFoundError(\"failed to find Public IP of Host '%s'\", instance.GetName())\n\t\t}\n\t}\n\n\treturn publicIP, nil\n}", "func (server Server) GetIP() string {\n\treturn server.IP\n}", "func (j * JoinHelper) GetIP () error{\n\t// get IP\n\tcmds := []string{\"echo \\\"net.ipv4.ip_forward=1\\\" >> /etc/sysctl.conf\",\n\t\t\"sysctl -p\"}//,\n\t\t//fmt.Sprintf(\"dhclient vpn_%s\", nicName)}\n\tfor _, command := range cmds {\n\t\tcmd := exec.Command(\"/bin/sh\", \"-c\", command)\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tlog.Warn().Str(\"command\", command).Str(\"error\", err.Error()).Msg(\"error executing\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn j.ExecuteDhClient()\n}", "func getRemoteIP() (ip string, err error) {\n\tclient := &http.Client{\n\t\tTimeout: time.Second * 10,\n\t}\n\n\treq, err := http.NewRequest(\"GET\", \"http://ip.cn\", nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer res.Body.Close()\n\n\tdata, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tip = string(ipPattern.Find(data))\n\n\treturn\n}", "func (th *TailHandler) getIp(info []string) (string, string) {\n\tif info[8] == \"-\" || info[8] == \"\" {\n\t\treturn th.Ip(info[0])\n\t}\n\treturn th.Ip(info[8])\n}", "func locateIpCountry(ipAddr net.IP) (CountryData, error) {\n var result CountryData\n\n record, err := dataStore.Country(ipAddr)\n\n if err != nil {\n return result, err\n }\n\n if record != nil {\n result.IPAddress = ipAddr\n result.Name = record.Country.Names[\"en\"]\n result.IsoCode = record.Country.IsoCode\n }\n\n return result, nil\n}", "func (c *Info) IP() net.IP {\n\tself := c.Self()\n\n\ts, err := net.ResolveTCPAddr(\"tcp\", DefaultPort(self))\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn s.IP\n}", "func (s *store) Get(IP string) (string, error) {\n\ts.mutex.RLock()\n\tdefer s.mutex.RUnlock()\n\tif role, ok := s.rolesByIP[IP]; ok {\n\t\treturn role, nil\n\t}\n\tif s.defaultRole != \"\" {\n\t\tlog.Warnf(\"Using fallback role for IP %s\", IP)\n\t\treturn s.defaultRole, nil\n\t}\n\treturn \"\", fmt.Errorf(\"Unable to find role for IP %s\", IP)\n}", "func getExternalIP() string {\n\tresp, err := http.Get(\"https://api.ipify.org?format=json\")\n\tif (err != nil) {\n\t\tlog.Fatal(\"Do: \", err)\n\t\treturn \"\"\n\t}\n\tdefer resp.Body.Close()\n\n\tvar record IPFy\n\n\tif err := json.NewDecoder(resp.Body).Decode(&record); err != nil {\n\t\tlog.Fatal(\"Decode \", err)\n\t\treturn \"\"\n\t}\n\treturn record.IP\n\n}", "func GetIPAddr() string {\n\tipList := getIPList()\n\n\t// Try to return public IP first\n\tfor _, ip := range ipList {\n\t\tprivate, err := isPrivateIP(ip)\n\t\tif err != nil || private {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn ip\n\t}\n\n\t// Return any IP if no public IP was found\n\tfor _, ip := range ipList {\n\t\treturn ip\n\t}\n\n\treturn \"\"\n}", "func GetIP(r *http.Request) string {\n\taddr := r.Header.Get(\"X-Forwarded-For\")\n\tif addr != \"\" {\n\t\treturn addr\n\t}\n\treturn r.RemoteAddr\n}", "func (o *WafEventNetwork) HasIp() bool {\n\tif o != nil && o.Ip != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func getIp() (string){\n\tconn, err := net.Dial(\"udp\", \"google.com:80\")\n if err != nil {\n\t fmt.Println(err.Error())\n\t log.Debugf(\"getIp udp error \", err.Error())\n\t return \"\"\n }\n defer conn.Close()\n // fmt.Println(\"conn.LocalAddr().String(),\", conn.LocalAddr().String())\n //log.Debugf(\"conn.LocalAddr().String(),\", conn.LocalAddr().String())\n // fmt.Println(strings.Split(conn.LocalAddr().String(), \":\")[0])\n return strings.Split(conn.LocalAddr().String(), \":\")[0]\n}", "func (o *NetworkingProjectNetadpCreate) HasIp() bool {\n\tif o != nil && o.Ip != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NetworkLoadBalancerForwardingRuleTarget) GetIpOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\n\treturn o.Ip, true\n}", "func getIP(ip_str string) (net.IP, error) {\n\tip := net.ParseIP(ip_str)\n\tif ip != nil {\n\t\treturn ip.To4(), nil\n\t}\n\treturn nil, errors.New(\"Failed to parse ip\")\n}", "func (c *APIClient) GetIP(strID string) (string, error) {\n\tid := types.StringID(strID)\n\tif id.IsEmpty() {\n\t\treturn \"\", fmt.Errorf(\"ServerID is invalid: %s\", strID)\n\t}\n\tserver, err := sacloud.NewServerOp(c.caller).Read(context.Background(), c.Zone, id)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn server.Interfaces[0].IPAddress, nil\n}", "func (i *InternalData) GetInternalIP() {\n\n\tconn, err := net.Dial(\"udp\", \"8.8.8.8:80\")\n\tcheckErr(err)\n\tdefer conn.Close()\n\n\tlocalAddr := conn.LocalAddr().(*net.UDPAddr)\n\ti.IntIP = localAddr.IP.String()\n}", "func (m *AuditLogMutation) IPAddress() (r string, exists bool) {\n\tv := m.ip_address\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func GetIP(hostname string) (ip string, err error) {\n\th, err := hostsfile.Open(HostsFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn h.GetIP(hostname)\n}", "func GetIPAddr() (string, error) {\n\tif SunnyDay {\n\t\treturn \"192.168.1.1\", nil\n\t}\n\treturn \"\", fmt.Errorf(\"No Internet\")\n\n}", "func (i EC2Instance) IP() string {\n\treturn i.privateIP\n}", "func (c *Connection) PublicIP() string {\n\tif c.ipAddress == \"\" {\n\t\tc.getPublicIP()\n\t}\n\treturn c.ipAddress\n}", "func (c *Client) GetIPAddress(ctx context.Context, id string) (*InstanceIP, error) {\n\te, err := c.IPAddresses.Endpoint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te = fmt.Sprintf(\"%s/%s\", e, id)\n\tr, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceIP{}).Get(e))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.Result().(*InstanceIP), nil\n}", "func (input *Input) IP() string {\n\tips := input.Proxy()\n\tif len(ips) > 0 && ips[0] != \"\" {\n\t\trip := strings.Split(ips[0], \":\")\n\t\treturn rip[0]\n\t}\n\tip := strings.Split(input.Context.Request.RemoteAddr, \":\")\n\tif len(ip) > 0 {\n\t\tif ip[0] != \"[\" {\n\t\t\treturn ip[0]\n\t\t}\n\t}\n\treturn \"127.0.0.1\"\n}", "func IP(v string) predicate.IP {\n\treturn predicate.IP(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldIP), v))\n\t})\n}", "func (n *Node) IP() (ipv4 string, ipv6 string, err error) {\n\t// use the cached version first\n\tcachedIPv4, cachedIPv6 := n.cache.IP()\n\tif cachedIPv4 != \"\" && cachedIPv6 != \"\" {\n\t\treturn cachedIPv4, cachedIPv6, nil\n\t}\n\t// retrieve the IP address of the node using docker inspect\n\tlines, err := oci.Inspect(n.name, \"{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}\")\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Wrap(err, \"failed to get container details\")\n\t}\n\tif len(lines) != 1 {\n\t\treturn \"\", \"\", errors.Errorf(\"file should only be one line, got %d lines\", len(lines))\n\t}\n\tips := strings.Split(lines[0], \",\")\n\tif len(ips) != 2 {\n\t\treturn \"\", \"\", errors.Errorf(\"container addresses should have 2 values, got %d values\", len(ips))\n\t}\n\tn.cache.set(func(cache *nodeCache) {\n\t\tcache.ipv4 = ips[0]\n\t\tcache.ipv6 = ips[1]\n\t})\n\treturn ips[0], ips[1], nil\n}", "func (i EC2Instance) PublicIP() string {\n\tif i.publicIP == \"\" {\n\t\tlog.Printf(\"ERROR: Attempting to get public IP of %s, which is not know\\n\", i.name)\n\t}\n\treturn i.publicIP\n}", "func (b *Box) GetPublicIp() string {\n\treturn b.PublicIp\n}", "func (g *IPCheck) Check(ip string) (ret bool) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Errorf(\"error geo.Check failed: %v\", r)\n\t\t\tret = true\n\t\t}\n\t}()\n\tif isLocalAddress(ip) {\n\t\treturn true\n\t}\n\tg.RLock()\n\tval, ok := g.cache[ip]\n\tg.RUnlock()\n\tif ok {\n\t\treturn val\n\t}\n\n\tif g.checkIPWhitelist(ip) {\n\t\tg.Lock()\n\t\tdefer g.Unlock()\n\t\tg.cache[ip] = true\n\t\treturn true\n\t}\n\n\tvar cc string\n\n\tswitch {\n\tcase g.handleV1 != nil:\n\t\tlocation := g.handleV1.GetLocationByIP(ip)\n\t\tif location != nil {\n\t\t\tcc = location.CountryCode\n\t\t}\n\tcase g.handleV2 != nil:\n\t\ti := net.ParseIP(ip)\n\n\t\tif i == nil {\n\t\t\tlog.Errorf(\"error parsing IP address: %s\", ip)\n\t\t\treturn false\n\t\t}\n\n\t\tc, err := g.handleV2.Country(i)\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error geo.Check on IP %s failed with error: %s\", ip, err)\n\t\t\treturn true\n\t\t}\n\n\t\tcc = c.Country.IsoCode\n\tdefault:\n\t\tlog.Errorf(\"geo ip handle not set\")\n\t\treturn true\n\t}\n\n\tif cc != \"\" {\n\t\tval := contains(g.allowedCountryCodes, cc)\n\t\tg.Lock()\n\t\tdefer g.Unlock()\n\t\tg.cache[ip] = val\n\t\treturn val\n\t}\n\tlog.Errorf(\"warn ip %s not found in database\", ip)\n\treturn false\n}", "func (ia *IPApi) MyIP() (ip string, err error) {\n\tresp, err := ia.Client.Get(MyIPUrl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn \"\", fmt.Errorf(\"status code: %d\", resp.StatusCode)\n\t}\n\n\tinfos := make(map[string]string)\n\terr = json.Unmarshal(body, &infos)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tip, ok := infos[\"ip\"]\n\tif !ok {\n\t\treturn \"\", ErrInvalidRespResult\n\t}\n\treturn ip, nil\n}", "func (ipfw *IPFilter) LookupDBIP(ip string) (*DBIPResult, error) {\n\tif len(ipfw.config.DBIPToken) == 0 {\n\t\tlogrus.Printf(\"ERR: DB_IP_TOKEN is empty?\")\n\t}\n\treq, err := http.NewRequest(\n\t\t\"GET\",\n\t\tfmt.Sprintf(\"https://api.db-ip.com/v2/%s/%s\", ipfw.config.DBIPToken, ip),\n\t\tnil,\n\t)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := http.DefaultClient\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\n\t// Parse result\n\tb, _ := ioutil.ReadAll(res.Body)\n\tvar parsed *DBIPResult\n\n\tif err := json.Unmarshal(b, &parsed); err != nil {\n\t\treturn nil, err\n\t}\n\n\t//:wlogrus.Debugf(\"[IPFilter] Lookup result\")\n\n\treturn parsed, nil\n}", "func (v *Client) IP() string {\n\tif v.ip == \"\" {\n\t\tip := strings.TrimSpace(v.req.Header.Get(\"X-Real-Ip\"))\n\t\tif len(ip) > 0 {\n\t\t\tv.ip = ip\n\t\t\treturn ip\n\t\t}\n\t\tip = v.req.Header.Get(\"X-Forwarded-For\")\n\t\tif index := strings.IndexByte(ip, ','); index >= 0 {\n\t\t\tip = ip[0:index]\n\t\t}\n\t\tip = strings.TrimSpace(ip)\n\t\tif len(ip) > 0 {\n\t\t\tv.ip = ip\n\t\t\treturn ip\n\t\t}\n\t\tif ip, _, err := net.SplitHostPort(strings.TrimSpace(v.req.RemoteAddr)); err == nil {\n\t\t\tv.ip = ip\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn v.ip\n}", "func (instance *Host) GetPrivateIP(_ context.Context) (_ string, ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tif valid.IsNil(instance) {\n\t\treturn \"\", fail.InvalidInstanceError()\n\t}\n\n\tthis, err := instance.MetadataCore.properties.UnWrap()\n\tif err != nil {\n\t\treturn \"\", fail.ConvertError(err)\n\t}\n\n\tif val, ok := this[hostproperty.NetworkV2]; !ok {\n\t\treturn \"\", fail.NewError(\"corrupted metadata\")\n\t} else {\n\t\tif val == nil {\n\t\t\treturn \"\", fail.NewError(\"corrupted metadata\")\n\t\t}\n\t}\n\n\taclo, err := this[hostproperty.NetworkV2].UnWrap()\n\tif err != nil {\n\t\treturn \"\", fail.ConvertError(err)\n\t}\n\thnV2, _ := aclo.(*propertiesv2.HostNetworking) // nolint\n\n\tvar privateIP string\n\tif len(hnV2.IPv4Addresses) > 0 {\n\t\tprivateIP = hnV2.IPv4Addresses[hnV2.DefaultSubnetID]\n\t\tif privateIP == \"\" {\n\t\t\tprivateIP = hnV2.IPv6Addresses[hnV2.DefaultSubnetID]\n\t\t}\n\t}\n\n\tif privateIP == \"\" {\n\t\treturn \"\", fail.NotFoundError(\"failed to find Private IP of Host '%s'\", instance.GetName())\n\t}\n\treturn privateIP, nil\n}", "func (s *ServerInfo) GetIpAddr() string {\n\treturn s.ipAddr\n}", "func (registry *Registry) GetIP(_, reply *string) error {\n\t*reply = registryService.GetIP()\n\treturn nil\n}", "func (ins *EC2RemoteClient) getIPAddress() error {\n\tresult, err := ins.ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIds: aws.StringSlice([]string{ins.InstanceID})})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error getting instance details : %s\", err)\n\t}\n\tins.instanceIP = net.ParseIP(*result.Reservations[0].Instances[0].PublicIpAddress)\n\tif ins.instanceIP == nil {\n\t\treturn fmt.Errorf(\"Error parsing IP address\")\n\t}\n\treturn err\n}", "func GetIP() (string, error) {\n\tcmd := exec.Command(\"hostname\", \"-i\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(out.String()), nil\n}", "func (info *endpointsInfo) IP() string {\n\treturn info.ip\n}", "func (input *BeegoInput) IP() string {\n\tips := input.Proxy()\n\tif len(ips) > 0 && ips[0] != \"\" {\n\t\trip, _, err := net.SplitHostPort(ips[0])\n\t\tif err != nil {\n\t\t\trip = ips[0]\n\t\t}\n\t\treturn rip\n\t}\n\tif ip, _, err := net.SplitHostPort(input.Context.Request.RemoteAddr); err == nil {\n\t\treturn ip\n\t}\n\treturn input.Context.Request.RemoteAddr\n}", "func (s Store) GetFromIP(ip net.IP) (l *Lease, err error) {\n\tnewl := &Lease{}\n\tnewl, err = s.leases.IP(ip)\n\treturn newl, err\n}" ]
[ "0.6942995", "0.6917147", "0.6873968", "0.68166244", "0.67154366", "0.66841614", "0.6684137", "0.6615601", "0.6578332", "0.6521607", "0.6519366", "0.64663374", "0.6433581", "0.6413792", "0.6356611", "0.6311986", "0.63093966", "0.62835115", "0.62776005", "0.6260319", "0.6235197", "0.6217585", "0.6203501", "0.61808366", "0.61768866", "0.6168224", "0.61625904", "0.6149338", "0.61490905", "0.61483955", "0.6141432", "0.6136529", "0.61288095", "0.6114771", "0.6112293", "0.6101241", "0.6098467", "0.6083163", "0.6078758", "0.6077149", "0.60587966", "0.6050143", "0.60232913", "0.60111004", "0.601072", "0.6008852", "0.59904283", "0.59830195", "0.59706897", "0.5967448", "0.5956613", "0.5946666", "0.594426", "0.593506", "0.5932865", "0.5927425", "0.5927228", "0.59164476", "0.59027565", "0.58870673", "0.588661", "0.58812726", "0.587125", "0.5851974", "0.5824752", "0.58234614", "0.5813285", "0.58044845", "0.5794796", "0.57882476", "0.5787612", "0.57735664", "0.5746298", "0.5719259", "0.5711969", "0.5708459", "0.57028973", "0.570187", "0.5699153", "0.5687998", "0.5682153", "0.56819195", "0.5666511", "0.5665352", "0.56599575", "0.5656587", "0.5651423", "0.56509495", "0.5647039", "0.5646458", "0.56385255", "0.5637345", "0.5630703", "0.56281894", "0.56234384", "0.5622985", "0.5611667", "0.56085837", "0.5607879", "0.5596674" ]
0.7603416
0
formatUnix formats a unix timestamp into a human readable string that is formatted according to the timeFormat global variable.
func formatUnix(unixTime int64) string { t := time.Unix(unixTime, 0) return t.Format(timeFormat) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func formatUnixTime(dt int64, timezone string) (string, string, error) {\n\tt := time.Unix(dt, 0)\n\n\tloc, err := time.LoadLocation(timezone)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to load location\")\n\t\treturn \"\", \"\", errors.New(\"Unable to load timezone location\")\n\t}\n\n\tt = t.In(loc)\n\n\treturn t.Format(\"01/02/2006\"), t.Weekday().String(), nil\n}", "func TimeUnix() string {\r\n\tnaiveTime := time.Now().Unix()\r\n\tnaiveTimeString := strconv.FormatInt(naiveTime, 10)\r\n\treturn naiveTimeString\r\n}", "func (t *TimePeriod) FormatStartUnix() string {\n\treturn strconv.FormatInt(t.Start.Unix(), 10)\n}", "func parseUnixTimeString(ref *ShapeRef, memName, v string) string {\n\tref.API.AddSDKImport(\"private/protocol\")\n\treturn fmt.Sprintf(\"%s: %s,\\n\", memName, inlineParseModeledTime(protocol.UnixTimeFormatName, v))\n}", "func UnixTimestamp(sec int64, layout string) string {\n\treturn time.Unix(sec, 0).Format(layout)\n}", "func parseUnix(str string, def time.Time) (time.Time, error) {\n\tif len(str) == 0 {\n\t\treturn def, nil\n\t}\n\tunix, err := strconv.ParseInt(str, 10, 64)\n\tif err != nil {\n\t\treturn def, err\n\t}\n\treturn time.Unix(unix, 0), nil\n}", "func TimeUnix(inputTime time.Time) int64 {\n\treturn (inputTime.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)))\n}", "func daemonUnixTime(c *testing.T) string {\n\tc.Helper()\n\treturn parseEventTime(daemonTime(c))\n}", "func timeToFormat(t int64, f string) string {\n\tutc, err := time.LoadLocation(\"UTC\")\n\tif err != nil {\n\t\tlog.Println(\"time.LoadLocation failed:\", err)\n\t\treturn \"\"\n\t}\n\tparsedTime := time.Unix(t, 0)\n\treturn escape(parsedTime.In(utc).Format(f))\n}", "func (ins *FromUnix) String() string {\n\treturn fmt.Sprintf(\"%s, %s, %s, %s, %s, %s = time.FromUnix(%s)\",\n\t\tins.Year, ins.Month, ins.Day, ins.Hour, ins.Minute, ins.Second,\n\t\tins.Seconds)\n}", "func Unix(sec int64, nsec int64) Time {}", "func Format(s string, t time.Time) string {\n\trawtime := C.long(t.Unix())\n\tvar info *C.struct_tm\n\tC.ctime(&rawtime)\n\tinfo = C.localtime(&rawtime)\n\tbuf := new(C.char)\n\tformat := C.CString(s)\n\tdefer C.free(unsafe.Pointer(format))\n\tmaxsize := C.ulong(256)\n\tC.strftime(buf, maxsize, format, info)\n\treturn C.GoString(buf)\n}", "func formatTimestamp(t time.Time, milli bool) string {\n\tif milli {\n\t\treturn fmt.Sprintf(\"%d.%03d\", t.Unix(), t.Nanosecond()/1000000)\n\t}\n\n\treturn fmt.Sprintf(\"%d\", t.Unix())\n}", "func FormatDate(timestamp int64, format string) (ret string) {\n\treturn time.Unix(timestamp, 0).Format(format)\n}", "func timeFmt(w io.Writer, x interface{}, format string) {\n\t// note: os.Dir.Mtime_ns is in uint64 in ns!\n\ttemplate.HTMLEscape(w, strings.Bytes(time.SecondsToLocalTime(int64(x.(uint64)/1e9)).String()))\n}", "func (t Time) Unix() int64 {}", "func (p Packet) TimeUnix() int64 {\n\treturn int64(p.CdTime >> 30)\n}", "func DateFormat(times int64) string {\n\treturn time.Unix(times, 0).Format(\"2006/01/02 15:04:05\")\n}", "func MysqlTimeToUnix(ts string) int64 {\n\tloc, _ := time.LoadLocation(\"Local\")\n\tt, _ := time.ParseInLocation(goMysqlTimeFormat, ts, loc)\n\treturn t.Unix()\n}", "func NowTimeUnix() uint64 {\n\treturn uint64(time.Now().Unix())\n}", "func UnixToMysqlTime(ti int64) string {\n\treturn time.Unix(ti, 0).Format(goMysqlTimeFormat)\n}", "func getUnixTime(val []byte) (uint64, error) {\n\tif len(val) < 8 {\n\t\treturn 0, errors.New(\"len(val) < 8, want len(val) => 8\")\n\t}\n\tunixTime := ((uint64)(val[0]) | // 1st\n\t\t((uint64)(val[1]) << 8) | // 2nd\n\t\t((uint64)(val[2]) << 16) | // 3rd\n\t\t((uint64)(val[3]) << 24) | // 4th\n\t\t((uint64)(val[4]) << 32) | // 5th\n\t\t((uint64)(val[5]) << 40) | // 6th\n\t\t((uint64)(val[6]) << 48) | // 7th\n\t\t((uint64)(val[7]) << 56)) // 8th\n\treturn unixTime, nil\n}", "func GetTimeUnixToTime(timeUnix int64) time.Time {\n\treturn time.Unix(timeUnix, 0)\n}", "func (o *MetricsDataValue) SetUnixTime(v string) {\n\to.UnixTime = &v\n}", "func Unix(sec, nsec int64) *Timestamp {\n\tt := time.Unix(sec, nsec).UTC()\n\treturn Time(t)\n}", "func validateFieldTimeUnix(fl validator.FieldLevel) bool {\n\tv := ParseTimeUnixUTC(fl.Field().String())\n\tif v.IsZero() {\n\t\treturn false\n\t}\n\treturn v.Unix() > unixTimeMin\n}", "func render_date_to_unix_timestamp(value string) ( int64 , error ) {\n ti , err := time.ParseInLocation(dateformat_onlydate,value,time.Local)\n if err != nil {\n return 0 , err\n }\n return ti.Unix() , nil\n}", "func IsUnixTime(str string) bool {\n\tif _, err := strconv.Atoi(str); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func (val Time) Unix() (sec, nsec int64) {\n\tsec = int64(val) / 1000000\n\tnsec = (int64(val) % 1000000) * 1000\n\treturn\n}", "func (gdb *Gdb) getUnixTimeStamp(t string, d int) (int64, error) {\n\tif st, err := time.Parse(timeFormatString, t); err != nil {\n\t\treturn -1, err\n\t} else {\n\t\treturn st.Add(time.Duration(d) * time.Second).Unix(), nil\n\t}\n}", "func (t Timestamp) Unix() int64 {\n\treturn time.Time(t).Unix()\n}", "func TestGetTimeFromUnixTime(t *testing.T) {\n\tvar basetime int64\n\tbasetime = 1500000000\n\n\tts := Timestamp{}\n\n\ttime := ts.GetTimeFromUnixTime(basetime)\n\n\tif time != \"Fri, 14 Jul 2017 02:40:00 UTC\" {\n\t\tt.Errorf(\"ts = \\t\\\"%v\\\";\\twant\\t\\\"Fri, 14 Jul 2017 02:40:00 UTC\\\"\", time)\n\t}\n}", "func (u *Util) DateTime2Unix(dateTime string) (int64, error) {\n\t//remove un useful info\n\tdateTime = strings.Replace(dateTime, \"T\", \" \", -1)\n\tdateTime = strings.Replace(dateTime, \"Z\", \"\", -1)\n\n\t//theTime, err := time.Parse(TimeLayOut, dateTime)\n\ttheTime, err := time.ParseInLocation(TimeLayoutStr, dateTime, time.Local)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn theTime.Unix(), nil\n}", "func AsUnixTime(t time.Time) UnixTime {\n\treturn UnixTime(t.Unix())\n}", "func AsUnixTime(t time.Time) UnixTime {\n\treturn UnixTime(t.Unix())\n}", "func timeConversion(unixTime string) string {\r\n\tuTime, _ := strconv.ParseInt(unixTime, 10, 64)\r\n\tdateTime := time.Unix(uTime, 0)\r\n\tloc, _ := time.LoadLocation(\"America/New_York\")\r\n\tnewTime := dateTime.In(loc).Format(\"2006-01-02 15:04:05\")\r\n\treturn newTime\r\n}", "func (t Timestamp) Unix() int64 {\n\treturn t.Time().Unix()\n}", "func (ts TimeStamp) Format(f string) string {\n\treturn ts.FormatInLocation(f, setting.DefaultUILocation)\n}", "func (u *Util) ConvertStrTime2Unix(timeStr string) (int64, error) {\n\tnowT := time.Now()\n\ttm, err := time.Parse(TimeLayoutStr, timeStr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttm.Sub(nowT)\n\treturn tm.UTC().Unix(), nil\n}", "func formatTime(t time.Time) string {\n\tif t.Unix() < 1 {\n\t\t// It's more confusing to display the UNIX epoch or a zero value than nothing\n\t\treturn \"\"\n\t}\n\t// Return ISO_8601 time format GH-3806\n\treturn t.Format(\"2006-01-02T15:04:05Z07:00\")\n}", "func (dt DateTime) Unix() int64 {\n\treturn dt.src.Unix()\n}", "func FormatNow() string {\n\treturn \"date_format(utc_timestamp(6),'%Y-%m-%d %H:%i:%s.%f')\"\n}", "func Unix(sec int64, nsec int64) *time.Time {\n\tt := time.Unix(sec, nsec)\n\treturn &t\n}", "func (t Time) Format(ft string) string {\n\tp := epoch.Add(time.Second * time.Duration(t))\n\tif ft == \"\" {\n\t\tft = \"02 Jan 2006 15:04:05\"\n\t}\n\treturn p.In(time.UTC).Format(ft)\n}", "func (ts Timespec) Unix() (sec int64, nsec int64) {\n\treturn int64(ts.Sec), int64(ts.Nsec)\n}", "func NewUnix(secs int64) Unix {\n\treturn Unix(time.Unix(secs, 0))\n}", "func Unix(sec int64, nsec int64) *Time {\n\treturn &Time{time.Unix(sec, nsec)}\n}", "func FormatTime(t int64) (f string) {\n\tts := time.Unix(t, 0)\n\tnow := time.Now()\n\tdiff := now.Unix() - ts.Unix()\n\tvar unit string\n\tvar m int64\n\tif diff < 60 {\n\t\tunit = \"second\"\n\t\tm = 1\n\t} else if diff < 3600 {\n\t\tunit = \"minute\"\n\t\tm = 60\n\t} else if diff < 86400 {\n\t\tunit = \"hour\"\n\t\tm = 3600\n\t} else if diff < 604800 {\n\t\tunit = \"day\"\n\t\tm = 86400\n\t} else if diff < 31556926 {\n\t\tunit = \"month\"\n\t\tm = 604800\n\t} else {\n\t\tunit = \"year\"\n\t\tm = 31556926\n\t}\n\tm = diff / m\n\tif m == 1 {\n\t\tif unit == \"hour\" {\n\t\t\tf = \"an hour ago\"\n\t\t} else {\n\t\t\tf = \"a \" + unit + \" ago\"\n\t\t}\n\t} else {\n\t\tf = fmt.Sprintf(\"%d %ss ago\", m, unit)\n\t}\n\treturn\n}", "func (t *Time) Format(format string) string {\n\trunes := []rune(format)\n\tbuffer := bytes.NewBuffer(nil)\n\tfor i := 0; i < len(runes); {\n\t\tswitch runes[i] {\n\t\tcase '\\\\':\n\t\t\tif i < len(runes)-1 {\n\t\t\t\tbuffer.WriteRune(runes[i+1])\n\t\t\t\ti += 2\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\treturn buffer.String()\n\t\t\t}\n\t\tcase 'W':\n\t\t\tbuffer.WriteString(strconv.Itoa(t.WeeksOfYear()))\n\t\tcase 'z':\n\t\t\tbuffer.WriteString(strconv.Itoa(t.DayOfYear()))\n\t\tcase 't':\n\t\t\tbuffer.WriteString(strconv.Itoa(t.DaysInMonth()))\n\t\tcase 'U':\n\t\t\tbuffer.WriteString(strconv.FormatInt(t.Unix(), 10))\n\t\tdefault:\n\t\t\tif runes[i] > 255 {\n\t\t\t\tbuffer.WriteRune(runes[i])\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif f, ok := formats[byte(runes[i])]; ok {\n\t\t\t\tresult := t.Time.Format(f)\n\t\t\t\t// Particular chars should be handled here.\n\t\t\t\tswitch runes[i] {\n\t\t\t\tcase 'j':\n\t\t\t\t\tfor _, s := range []string{\"=j=0\", \"=j=\"} {\n\t\t\t\t\t\tresult = strings.Replace(result, s, \"\", -1)\n\t\t\t\t\t}\n\t\t\t\t\tbuffer.WriteString(result)\n\t\t\t\tcase 'G':\n\t\t\t\t\tfor _, s := range []string{\"=G=0\", \"=G=\"} {\n\t\t\t\t\t\tresult = strings.Replace(result, s, \"\", -1)\n\t\t\t\t\t}\n\t\t\t\t\tbuffer.WriteString(result)\n\t\t\t\tcase 'u':\n\t\t\t\t\tbuffer.WriteString(strings.Replace(result, \"=u=.\", \"\", -1))\n\t\t\t\tcase 'w':\n\t\t\t\t\tbuffer.WriteString(weekMap[result])\n\t\t\t\tcase 'N':\n\t\t\t\t\tbuffer.WriteString(strings.Replace(weekMap[result], \"0\", \"7\", -1))\n\t\t\t\tcase 'S':\n\t\t\t\t\tbuffer.WriteString(formatMonthDaySuffixMap(result))\n\t\t\t\tdefault:\n\t\t\t\t\tbuffer.WriteString(result)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbuffer.WriteRune(runes[i])\n\t\t\t}\n\t\t}\n\t\ti++\n\t}\n\treturn buffer.String()\n}", "func FormatDate(tsMillis uint64) string {\n\treturn time.Unix(0, int64(tsMillis*UnixTimeUnitOffset)).Format(DateFormat)\n}", "func (ts TimeStamp) FormatLong() string {\n\treturn ts.Format(time.RFC1123Z)\n}", "func TestTimeUnix(t *testing.T) {\n\t// Convey(\"TimeUnix\", t, func(c C) {\n\t// \twg := sync.WaitGroup{}\n\t// \tfor i := 0; i < 100; i++ {\n\t// \t\twg.Add(1)\n\t// \t\tgo func() {\n\t// \t\t\toldT := TimeUnix()\n\t// \t\t\tfor j := int64(0); j < 5; j++ {\n\t// \t\t\t\ttime.Sleep(time.Second + time.Millisecond*100)\n\t// \t\t\t\tnewT := TimeUnix()\n\t// \t\t\t\tc.So(newT, ShouldEqual, oldT+j+1)\n\t// \t\t\t}\n\t// \t\t\twg.Done()\n\t// \t\t}()\n\t// \t}\n\t// \twg.Wait()\n\t// })\n}", "func UTCNowUnix() int64 {\n\treturn (time.Now().UTC().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)))\n}", "func FormatTimeMillis(tsMillis uint64) string {\n\treturn time.Unix(0, int64(tsMillis*UnixTimeUnitOffset)).Format(TimeFormat)\n}", "func formatTime(ts, now time.Time) string {\n\tif d := ts.Sub(now); -12*time.Hour < d && d < 12*time.Hour {\n\t\treturn ts.Format(\"3:04 PM\")\n\t} else {\n\t\treturn ts.Format(\"Jan 2, 2006\")\n\t}\n}", "func convertUnixTime(timeInt int) time.Time {\n\n\ti := int64(timeInt)\n\treturn time.Unix(i, 0)\n\n}", "func FixActionCreatedUnixString(ctx context.Context) (int64, error) {\n\tif setting.Database.Type.IsSQLite3() {\n\t\tres, err := db.GetEngine(ctx).Exec(`UPDATE action SET created_unix = 0 WHERE created_unix = \"\"`)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn res.RowsAffected()\n\t}\n\treturn 0, nil\n}", "func (ns *Namespace) Format(layout string, v interface{}) (string, error) {\n\tt, err := cast.ToTimeE(v)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn t.Format(layout), nil\n}", "func TimestampToUnixMillisecondsString(time time.Time) string {\n\treturn strconv.FormatInt(time.Unix()*1000, 10)\n}", "func (o *MetricsDataValue) GetUnixTime() string {\n\tif o == nil || o.UnixTime == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.UnixTime\n}", "func FormatTime(ts time.Time) string {\n\tts = ts.In(time.UTC)\n\n\tval := make([]byte, 6)\n\n\tym := uint8(ts.Year()-epochYear)<<4 | uint8(ts.Month())\n\tymOff := uint8(ym / alphabets)\n\tval[0] = alphabet[ym%alphabets] // year & month\n\n\tval[1] = alphabet[ts.Day()] // day\n\n\thourOff := uint8(ts.Hour() / alphabets)\n\tval[2] = alphabet[ts.Hour()%alphabets] // hour\n\n\tminuteOff := uint8(ts.Minute() / alphabets)\n\tval[3] = alphabet[ts.Minute()%alphabets] // minute\n\n\tsecondOff := uint8(ts.Second() / alphabets)\n\tval[4] = alphabet[ts.Second()%alphabets] // second\n\n\tval[5] = alphabet[ymOff<<3|hourOff<<2|minuteOff<<1|secondOff] // off\n\n\treturn string(val)\n}", "func (d Dispatcher) JobSubmisstionTimeUnix(id string) (int64, error) {\n\tj, err := d.GetBC().FindJob(id)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn j.GetSubmissionTime(), nil\n}", "func fnFormat(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) == 0 || len(params) > 3 {\n\t\tctx.Log().Error(\"error_type\", \"func_format\", \"op\", \"format\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to format function\"), \"format\", params})\n\t\treturn \"\"\n\t}\n\tts := time.Now()\n\tif len(params) >= 1 {\n\t\tms, err := strconv.Atoi(extractStringParam(params[0]))\n\t\tif err != nil {\n\t\t\tctx.Log().Error(\"error_type\", \"func_format\", \"op\", \"format\", \"cause\", \"time_stamp_expected\", \"params\", params, \"error\", err.Error())\n\t\t\tstats.IncErrors()\n\t\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"time stamp parameter expected in call to format function\"), \"format\", params})\n\t\t\treturn \"\"\n\t\t}\n\t\tts = time.Unix(int64(ms/1000), 0)\n\t}\n\tlayout := \"3:04pm\"\n\tif len(params) >= 2 {\n\t\tlayout = extractStringParam(params[1])\n\t}\n\tif len(params) == 3 {\n\t\ttz, err := time.LoadLocation(extractStringParam(params[2]))\n\t\tif err == nil {\n\t\t\tts = ts.In(tz)\n\t\t} else {\n\t\t\tctx.Log().Error(\"error_type\", \"func_format\", \"op\", \"format\", \"cause\", \"failed_loading_location\", \"location\", extractStringParam(params[2]), \"error\", err.Error())\n\t\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"failed loading location %s in call to format function\", extractStringParam(params[2])), \"format\", params})\n\t\t}\n\t}\n\treturn ts.Format(layout)\n}", "func FormatTime(t time.Time) string {\n\treturn fmt.Sprintf(`\"%s\"`, t.UTC().Format(GerritTimestampLayout))\n}", "func formatTime(t time.Time) string {\n\tzone, _ := t.Zone()\n\t// NOTE: Tried to use time#Format(), but it is very weird implementation.\n\t// Third-party libraries seem not to be maintained.\n\treturn fmt.Sprintf(\n\t\t\"%d%02d%02d_%02d%02d_%02d_%06d_%s\",\n\t\tt.Year(), t.Month(), t.Day(),\n\t\tt.Hour(), t.Minute(),\n\t\tt.Second(),\n\t\tt.Nanosecond() / 1000,\n\t\tzone,\n\t)\n}", "func (d Date) Unix() int64 {\n\treturn d.ToTime().Unix()\n}", "func FormatSeconds(seconds float64) string {\n\t// Make sure localised strings are fetched\n\tlocOnce.Do(func() {\n\t\tlocDay = glib.Local(\"one day\")\n\t\tlocDays = glib.Local(\"days\")\n\t})\n\n\tminutes, secs := int(seconds)/60, int(seconds)%60\n\thours, mins := minutes/60, minutes%60\n\tdays, hrs := hours/24, hours%24\n\tswitch {\n\tcase days > 1:\n\t\treturn fmt.Sprintf(\"%d %s %d:%02d:%02d\", days, locDays, hrs, mins, secs)\n\tcase days == 1:\n\t\treturn fmt.Sprintf(\"%s %d:%02d:%02d\", locDay, hrs, mins, secs)\n\tcase hours >= 1:\n\t\treturn fmt.Sprintf(\"%d:%02d:%02d\", hrs, mins, secs)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d:%02d\", mins, secs)\n\t}\n}", "func (t epochTime) Format(str string) string {\n\treturn time.Time(t).Format(str)\n}", "func unixTimeNow() int64 {\n\treturn time.Now().Unix()\n}", "func UTCUnixToTime(timestamp int64) time.Time {\n\treturn time.Unix(0, timestamp*int64(1000000))\n}", "func FormatTime(t time.Time) string {\n\tyear, month, day := t.Date()\n\tif year < 1 || year > 9999 {\n\t\treturn t.Format(TimeFormatLayout)\n\t}\n\thour, min, sec := t.Clock()\n\tmillisecond := t.Nanosecond() / 1e6\n\n\tyear100 := year / 100\n\tyear1 := year % 100\n\tmillisecond100 := millisecond / 100\n\tmillisecond1 := millisecond % 100\n\n\tvar result [23]byte\n\tresult[0], result[1], result[2], result[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1]\n\tresult[4] = '-'\n\tresult[5], result[6] = digits10[month], digits01[month]\n\tresult[7] = '-'\n\tresult[8], result[9] = digits10[day], digits01[day]\n\tresult[10] = ' '\n\tresult[11], result[12] = digits10[hour], digits01[hour]\n\tresult[13] = ':'\n\tresult[14], result[15] = digits10[min], digits01[min]\n\tresult[16] = ':'\n\tresult[17], result[18] = digits10[sec], digits01[sec]\n\tresult[19] = '.'\n\tresult[20], result[21], result[22] = digits01[millisecond100], digits10[millisecond1], digits01[millisecond1]\n\treturn string(result[:])\n}", "func Unix(origin string) *Clock {\n\treturn New(UNIXTICK, origin)\n}", "func stringToUnixTime(s string) int64 {\n\t// Parse YYYY-MM-DD\n\ttimeT, err := time.Parse(\"2006-01-02\", s)\n\tif err != nil {\n\t\tErrorLog(\"cannot convert string: \"+s+\"to millisecond: %s\", err.Error())\n\t\treturn 0\n\t}\n\treturn timeT.Unix()\n}", "func formatTime(second int) string {\n\thh := second / 3600\n\tmm := second % 3600 / 60\n\tss := second % 60\n\treturn fmt.Sprintf(\"%02d:%02d:%02d\", hh, mm, ss)\n}", "func timeFormat(i interface{}) string {\n\tif i == nil {\n\t\treturn \"\"\n\t}\n\n\treturn fmt.Sprintf(\"%s\", i)\n}", "func FormatTime(fmtTimeStampz string, sqlTypeName string, t time.Time) (v interface{}) {\n\tswitch sqlTypeName {\n\tcase types.Time:\n\t\ts := t.Format(\"2006-01-02 15:04:05\") // time.RFC3339\n\t\tv = s[11:19]\n\tcase types.Date:\n\t\tv = t.Format(\"2006-01-02\")\n\tcase types.DateTime, types.TimeStamp, types.Varchar: // !DarthPestilane! format time when sqlTypeName is schemas.Varchar.\n\t\tv = t.Format(\"2006-01-02 15:04:05\")\n\tcase types.TimeStampz:\n\t\tif fmtTimeStampz != \"\" {\n\t\t\t// dialect.URI().DBType == types.MSSQL ? \"2006-01-02T15:04:05.9999999Z07:00\"\n\t\t\tv = t.Format(fmtTimeStampz)\n\t\t} else {\n\t\t\t// if dialect.URI().DBType == types.MSSQL {\n\t\t\t// \tv = t.Format(\"2006-01-02T15:04:05.9999999Z07:00\")\n\t\t\t// } else {\n\t\t\tv = t.Format(time.RFC3339Nano)\n\t\t}\n\tcase types.BigInt, types.Int:\n\t\tv = t.Unix()\n\tdefault:\n\t\tv = t\n\t}\n\treturn\n}", "func (d Date) Format(ft string) string {\n\tt := epoch.Add(time.Hour * 24 * time.Duration(d))\n\tif ft == \"\" {\n\t\tft = \"02 Jan 2006\"\n\t}\n\treturn t.In(time.UTC).Format(ft)\n}", "func timeToUnix(t time.Time) int64 {\n\tif t.IsZero() {\n\t\treturn 0\n\t}\n\n\treturn t.Unix()\n}", "func (o *os) GetUnixTime() gdnative.Int {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetUnixTime()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_unix_time\")\n\n\t// Call the parent method.\n\t// int\n\tretPtr := gdnative.NewEmptyInt()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewIntFromPointer(retPtr)\n\treturn ret\n}", "func ParseFormat(format string) string {\n\tformat = strings.ReplaceAll(format, \"YYYY\", \"2006\") // Long year\n\tformat = strings.ReplaceAll(format, \"YY\", \"06\") // Short year\n\tformat = strings.ReplaceAll(format, \"MM\", \"01\") // Month (2-digit)\n\tformat = strings.ReplaceAll(format, \"M\", \"1\") // Month (1-digit)\n\tformat = strings.ReplaceAll(format, \"DD\", \"02\") // Day (2-digit)\n\tformat = strings.ReplaceAll(format, \"D\", \"2\") // Day (1-digit)\n\n\tformat = strings.ReplaceAll(format, \"hh\", \"15\") // Hour (2-digit)\n\tformat = strings.ReplaceAll(format, \"mm\", \"04\") // Minute (2-digit)\n\tformat = strings.ReplaceAll(format, \"m\", \"4\") // Minute (1-digit)\n\tformat = strings.ReplaceAll(format, \"ss\", \"05\") // Second (2-digit)\n\tformat = strings.ReplaceAll(format, \"s\", \"5\") // Second (1-digit)\n\n\treturn format\n}", "func FormatTmsp(num int) string {\n\treturn \"date_format(?,'%Y-%m-%d %H:%i:%s.%f')\"\n}", "func formatTime(t timeofday.TimeOfDay, tmp []byte) []byte {\n\t// time.Time's AppendFormat does not recognize 2400, so special case it accordingly.\n\tif t == timeofday.Time2400 {\n\t\treturn []byte(pgTime2400Format)\n\t}\n\treturn t.ToTime().AppendFormat(tmp, pgTimeFormat)\n}", "func ParseTimeUnixUTC(in string) time.Time {\n\ti, err := strconv.ParseInt(in, 10, 64)\n\tif err != nil {\n\t\treturn time.Time{}\n\t}\n\tv := time.Unix(i, 0)\n\treturn v\n}", "func FormatTime(theTime time.Time, layout string) (r string) {\n\tif theTime.IsZero() {\n\t\treturn \"\"\n\t}\n\n\treturn theTime.Format(layout)\n}", "func (t UnixTime) String() string {\n\treturn time.Time(t).String()\n}", "func GetUnixTimestamp() uint64 {\n\treturn uint64(time.Now().UnixNano()) / uint64(time.Millisecond)\n}", "func FormatDate(t int64) string {\n\texpTime := time.Unix(t, 0)\n\texpTimeString := expTime.Format(time.RFC3339)\n\t// Remove the last 6 characters (-03:00)\n\texpTimeString = expTimeString[:len(expTimeString)-6]\n\n\treturn expTimeString\n}", "func formatStrftime(in string) string {\n\treplacements := map[string]string{\n\t\t\"%p\": \"PM\",\n\t\t\"%Y\": \"2006\",\n\t\t\"%y\": \"06\",\n\t\t\"%m\": \"01\",\n\t\t\"%d\": \"02\",\n\t\t\"%H\": \"15\",\n\t\t\"%M\": \"04\",\n\t\t\"%S\": \"05\",\n\t}\n\n\tout := in\n\n\tfor bad, good := range replacements {\n\t\tout = strings.ReplaceAll(out, bad, good)\n\t}\n\treturn out\n}", "func FormatSince(t time.Time) string {\n\tconst (\n\t\tDecisecond = 100 * time.Millisecond\n\t\tDay = 24 * time.Hour\n\t)\n\tts := time.Since(t)\n\tsign := time.Duration(1)\n\tif ts < 0 {\n\t\tsign = -1\n\t\tts = -ts\n\t}\n\tts += +Decisecond / 2\n\td := sign * (ts / Day)\n\tts = ts % Day\n\th := ts / time.Hour\n\tts = ts % time.Hour\n\tm := ts / time.Minute\n\tts = ts % time.Minute\n\ts := ts / time.Second\n\tts = ts % time.Second\n\tf := ts / Decisecond\n\ty := d / 365\n\treturn fmt.Sprintf(\"P%dY%dD%dH%dM%d.%dS\", y, d, h, m, s, f)\n}", "func fromUnixMilli(ms int64) time.Time {\n\treturn time.Unix(ms/int64(millisInSecond), (ms%int64(millisInSecond))*int64(nsInSecond))\n}", "func NewUnixUptimeFn() tact.GetDataFn {\n\treturn func(session *tact.Session) <-chan []byte {\n\t\treturn collector.SSHRex(session, upTimeCmd, upTimeParser)\n\t}\n}", "func (f DefaultFormatter) Format(event *Event) string {\n\tyear, month, day := event.Time.Date()\n\thour, minute, second := event.Time.Clock()\n\tlevelString := event.Level.String()\n\trightAlignedLevel := strings.Repeat(\" \", 8-len(levelString)) + levelString\n\tmsg := event.Msg\n\tif len(event.Args) > 0 {\n\t\tmsg = fmt.Sprintf(event.Msg, event.Args...)\n\t}\n\tlines := strings.Split(msg, \"\\n\")\n\tfor i, line := range lines {\n\t\tlines[i] = \"\\t\" + line\n\t}\n\tmsg = strings.Join(lines, \"\\n\")\n\treturn fmt.Sprintf(\n\t\t\"%d-%02d-%02d %02d:%02d:%02d: %s: %s: at %s in %s, line %d:\\n%s\\n\\n\",\n\t\tyear, month, day, hour, minute, second,\n\t\trightAlignedLevel, event.Name, event.FuncName,\n\t\tfilepath.Base(event.File), event.Line,\n\t\tstrings.TrimRightFunc(msg, unicode.IsSpace))\n}", "func FormatTimestamp(t time.Time) float64 {\n\ts := fmt.Sprintf(\"%10.7f\", float64(t.UnixNano())/1e9)\n\tts, _ := strconv.ParseFloat(s, 64)\n\treturn ts\n}", "func (ts TimeStamp) FormatDate() string {\n\treturn time.Unix(int64(ts), 0).String()[:10]\n}", "func (ev *Event) Format(format string) string {\n\treturn fmt.Sprintf(\"Start um %s - Name %s\\n\", ev.Start.Format(\"15:04\"), ev.Name)\n\t//return fmt.Sprintf(\"Start um %s - Name %s\\n%s\\n\", ev.Start.Format(\"15:04\"), ev.Name, ev.Info)\n}", "func unixMilli(msec int64) time.Time {\n\treturn time.Unix(msec/1e3, (msec%1e3)*1e6)\n}", "func (j *Job) formattedTime() string {\n\tparsedCronTimeStr := fmt.Sprintf(\"%d:%d\", j.Hours, j.Minutes)\n\tif j.Hours < 10 {\n\t\tparsedCronTimeStr = fmt.Sprintf(\"0%d:%d\", j.Hours, j.Minutes)\n\t}\n\n\tif j.Minutes < 10 {\n\t\tparsedCronTimeStr = fmt.Sprintf(\"%d:0%d\", j.Hours, j.Minutes)\n\t}\n\n\tif j.Minutes < 10 && j.Hours < 10 {\n\t\tparsedCronTimeStr = fmt.Sprintf(\"0%d:0%d\", j.Hours, j.Minutes)\n\t}\n\n\treturn parsedCronTimeStr\n}", "func (f *GlogFormatter) Format(context log.LogContext) []byte {\n\tres := &bytes.Buffer{}\n\n\tfile := context.File\n\tslash := len(file) - 1\n\tfor ; slash >= 0; slash-- {\n\t\tif file[slash] == filepath.Separator {\n\t\t\tbreak\n\t\t}\n\t}\n\tif slash >= 0 {\n\t\tfile = file[slash+1:]\n\t}\n\n\t_, month, day := context.Time.Date()\n\thour, minute, second := context.Time.Clock()\n\tf.tmp[0] = log.UcShortestSeverityStrings[log.SeverityToIndex(context.Severity)][0]\n\tlog.TwoDigits(&f.tmp, 1, int(month))\n\tlog.TwoDigits(&f.tmp, 3, day)\n\tf.tmp[5] = ' '\n\tlog.TwoDigits(&f.tmp, 6, hour)\n\tf.tmp[8] = ':'\n\tlog.TwoDigits(&f.tmp, 9, minute)\n\tf.tmp[11] = ':'\n\tlog.TwoDigits(&f.tmp, 12, second)\n\tf.tmp[14] = '.'\n\tlog.NDigits(&f.tmp, 6, 15, context.Time.Nanosecond()/1000)\n\tf.tmp[21] = ' '\n\tlog.NDigits(&f.tmp, 5, 22, context.Pid)\n\tf.tmp[27] = ' '\n\tres.Write(f.tmp[:28])\n\tres.WriteString(file)\n\tf.tmp[0] = ':'\n\tn := log.Itoa(&f.tmp, 1, context.Line)\n\tf.tmp[n+1] = ']'\n\tf.tmp[n+2] = ' '\n\tres.Write(f.tmp[:n+3])\n\tmessage := \"\"\n\tif context.Format != nil {\n\t\tmessage = fmt.Sprintf(*context.Format, context.Args...)\n\t} else {\n\t\tmessage = fmt.Sprint(context.Args...)\n\t}\n\n\tres.WriteString(message)\n\n\tl := len(message)\n\tif l > 0 && message[l-1] != '\\n' {\n\t\tres.WriteRune('\\n')\n\t}\n\n\treturn res.Bytes()\n}", "func timeFormatter(t time.Time) string {\n\treturn time.Now().Format(time.RFC822)\n}", "func (t TimeInfo) Format(f string) string {\n\n\tr := []rune(f)\n\tvar s string\n\tfor _, v := range r {\n\t\tswitch v {\n\t\tcase 'Y':\n\t\t\ts += t.Year\n\t\tcase 'm':\n\t\t\ts += t.Month\n\t\tcase 'd':\n\t\t\ts += t.Day\n\t\tcase 'H':\n\t\t\ts += t.Hour\n\t\tcase 'i':\n\t\t\ts += t.Minute\n\t\tcase 's':\n\t\t\ts += t.Second\n\t\tdefault:\n\t\t\ts += string(v)\n\n\t\t}\n\t}\n\n\treturn s\n\n}" ]
[ "0.76288587", "0.6761688", "0.6435162", "0.622314", "0.6068956", "0.60416585", "0.5949711", "0.5738175", "0.56880945", "0.567122", "0.5648008", "0.5590446", "0.55742025", "0.55638146", "0.55632454", "0.55522686", "0.55406225", "0.55078954", "0.5498698", "0.54594755", "0.5437172", "0.54344773", "0.5402124", "0.5380922", "0.53705263", "0.5362692", "0.5349253", "0.53106886", "0.5309995", "0.5309207", "0.5262755", "0.5254386", "0.5243437", "0.5223598", "0.5223598", "0.52105445", "0.52009326", "0.5196711", "0.5194108", "0.515222", "0.5151668", "0.5140929", "0.5129826", "0.51259625", "0.51247114", "0.51024646", "0.51020247", "0.50907874", "0.50846535", "0.5075891", "0.5060332", "0.5049583", "0.50473624", "0.5034428", "0.50335866", "0.4997238", "0.49966362", "0.4984341", "0.49727604", "0.49640843", "0.49558717", "0.49552646", "0.49522984", "0.4946578", "0.49349982", "0.493427", "0.48931402", "0.48819423", "0.48805836", "0.48653775", "0.4864777", "0.484102", "0.48336366", "0.48304456", "0.48283702", "0.4814323", "0.4811796", "0.47909003", "0.47907963", "0.47905216", "0.47868508", "0.47830212", "0.47810352", "0.4759097", "0.4754772", "0.47458997", "0.47319517", "0.47259614", "0.47199726", "0.47064856", "0.47036642", "0.46893278", "0.46840718", "0.46759757", "0.46671718", "0.4658892", "0.46483064", "0.464565", "0.46419683", "0.4639712" ]
0.8631524
0
PrepayBalanceKey turn an address to key used to get prepaid balance from the sds store
func PrepayBalanceKey(acc []byte) []byte { return append(PrepayBalancePrefix, acc...) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func initStateKeyAddr(\n\taccountState AccountState,\n\tprivateKey crypto.PrivateKey,\n\tinitBalance *big.Int,\n\tbc blockchain.Blockchain,\n\tsf factory.Factory,\n) (crypto.PrivateKey, string, error) {\n\tretKey := privateKey\n\tretAddr := \"\"\n\tswitch accountState {\n\tcase AcntCreate:\n\t\taddr := retKey.PublicKey().Address()\n\t\tif addr == nil {\n\t\t\treturn nil, \"\", errors.New(\"failed to get address\")\n\t\t}\n\t\tretAddr = addr.String()\n\n\tcase AcntExist:\n\t\taddr := retKey.PublicKey().Address()\n\t\tif addr == nil {\n\t\t\treturn nil, \"\", errors.New(\"failed to get address\")\n\t\t}\n\t\tretAddr = addr.String()\n\t\tctx := genesis.WithGenesisContext(context.Background(), bc.Genesis())\n\t\texistState, err := accountutil.AccountState(ctx, sf, addr)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tinitBalance.Set(existState.Balance)\n\tcase AcntNotRegistered:\n\t\tsk, err := crypto.GenerateKey()\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\taddr := sk.PublicKey().Address()\n\t\tif addr == nil {\n\t\t\treturn nil, \"\", errors.New(\"failed to get address\")\n\t\t}\n\t\tretAddr = addr.String()\n\t\tretKey = sk\n\tcase AcntBadAddr:\n\t\trand.Seed(time.Now().UnixNano())\n\t\tb := make([]byte, 41)\n\t\tfor i := range b {\n\t\t\tb[i] = byte(65 + rand.Intn(26))\n\t\t}\n\t\tretAddr = string(b)\n\t}\n\treturn retKey, retAddr, nil\n}", "func GenerateBargainKey(addr string) string {\n\treturn fmt.Sprintf(\"bargain_addr_%s\", addr)\n}", "func makeAddress(keyPair *keypair.KeyPair, testnet bool) *account.Account {\n\n\treturn &account.Account{\n\t\tAccountInterface: &account.ED25519Account{\n\t\t\tTest: testnet,\n\t\t\tPublicKey: keyPair.PublicKey[:],\n\t\t},\n\t}\n}", "func (round *presign1) prepare() error {\n\ti := round.PartyID().Index\n\n\txi := round.key.Xi\n\tks := round.key.Ks\n\tBigXs := round.key.BigXj\n\n\t// adding the key derivation delta to the xi's\n\t// Suppose x has shamir shares x_0, x_1, ..., x_n\n\t// So x + D has shamir shares x_0 + D, x_1 + D, ..., x_n + D\n\tmod := common.ModInt(round.Params().EC().Params().N)\n\txi = mod.Add(round.temp.keyDerivationDelta, xi)\n\tround.key.Xi = xi\n\n\tif round.Threshold()+1 > len(ks) {\n\t\treturn fmt.Errorf(\"t+1=%d is not satisfied by the key count of %d\", round.Threshold()+1, len(ks))\n\t}\n\tif wi, BigWs, err := PrepareForSigning(round.Params().EC(), i, len(ks), xi, ks, BigXs); err != nil {\n\t\treturn err\n\t} else {\n\t\tround.temp.w = wi\n\t\tround.temp.BigWs = BigWs\n\t}\n\n\treturn nil\n}", "func (rpcServer * RPCServer)transferKeysToPredecessor(){\n\t//open a read transaction\n\trpcServer.boltDB.View(func(tx *bolt.Tx) error {\n\t\tvar cursor *bolt.Cursor\n\t\tcursor = tx.Cursor()\n\t\t\n\t\tvar bucket *bolt.Bucket\n\t\t\n\n\t\t//traverse through all keys\n\t\tfor k, _ := cursor.First(); k != nil; k, _ = cursor.Next() {\n\t\t\tbucket = tx.Bucket(k)\n\t\t\t\n\t\t\t//traverse through all relation and value pairs\n\t\t\tbucket.ForEach(func(relation, value []byte) error {\n\t\t\t\t//create paramter - successor\n\t\t\t\n\t\t\t\t//add to array of interface\n\t\t\t\t\n\t\t\t\tparameterArray := make([]interface{},3)\n\t\t\t\tparameterArray[0] = string(k)\n\t\t\t\tparameterArray[1] = string(relation)\n\t\t\t\tparameterArray[2] = string(value)\n\t\t\t\t\n\t\t\t\t//if hash value less than predecessor value - then only insert\n\t\t\t\tkeyRelationHash := rpcServer.chordNode.GetHashFromKeyAndValue(string(k),string(relation));\n\t\t\t\tif keyRelationHash > rpcServer.chordNode.Predecessor{\n\t\t\t\t\treturn nil\n\t\t\t\t} \n\n\t\t\t\t//create json message\n\t\t\t\tjsonMessage := rpcclient.RequestParameters{}\n\t\t\t\tjsonMessage.Method = \"Insert\";\n\t\t\t\tjsonMessage.Params = parameterArray\n\t\t\t\t\n\t\t\t\tjsonBytes,err :=json.Marshal(jsonMessage)\n\t\t\t\tif err!=nil{\n\t\t\t\t\trpcServer.logger.Println(err)\n\t\t\t\t\treturn err\n\t\t\t\t} \n \n\t\t\t\trpcServer.logger.Println(string(jsonBytes))\n\n\t\t\t\tclientServerInfo,err := rpcServer.chordNode.PrepareClientServerInfo(rpcServer.chordNode.Predecessor)\n\t\t\t\tif err!=nil{\n\t\t\t\t\t\n\t\t\t\t\trpcServer.logger.Println(err)\n\t\t\t\t\treturn nil\n\t\t\t\t\t\n\t\t\t\t}\n\t\t\t\tclient := &rpcclient.RPCClient{}\n\t\t\t\terr, _ = client.RpcCall(clientServerInfo, string(jsonBytes))\n\t\t\t\t\n\t\t\t\tif err != nil {\n\t\t\t\t\trpcServer.logger.Println(err)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t\treturn nil\n\t})\n\n}", "func GenerateSaveCoinKey(\n\tkeybase keyring.Keyring,\n\tkeyName, mnemonic string,\n\toverwrite bool,\n\talgo keyring.SignatureAlgo,\n) (sdk.AccAddress, string, error) {\n\texists := false\n\t_, err := keybase.Key(keyName)\n\tif err == nil {\n\t\texists = true\n\t}\n\n\t// ensure no overwrite\n\tif !overwrite && exists {\n\t\treturn sdk.AccAddress{}, \"\", fmt.Errorf(\"key already exists, overwrite is disabled\")\n\t}\n\n\tif exists {\n\t\tif err := keybase.Delete(keyName); err != nil {\n\t\t\treturn sdk.AccAddress{}, \"\", fmt.Errorf(\"failed to overwrite key\")\n\t\t}\n\t}\n\n\tvar (\n\t\trecord *keyring.Record\n\t\tsecret string\n\t)\n\n\t// generate or recover a new account\n\tif mnemonic != \"\" {\n\t\tsecret = mnemonic\n\t\trecord, err = keybase.NewAccount(keyName, mnemonic, keyring.DefaultBIP39Passphrase, sdk.GetConfig().GetFullBIP44Path(), algo)\n\t} else {\n\t\trecord, secret, err = keybase.NewMnemonic(keyName, keyring.English, sdk.GetConfig().GetFullBIP44Path(), keyring.DefaultBIP39Passphrase, algo)\n\t}\n\tif err != nil {\n\t\treturn sdk.AccAddress{}, \"\", err\n\t}\n\n\taddr, err := record.GetAddress()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\treturn addr, secret, nil\n}", "func _dbKeyForHODLerPKIDCreatorPKIDToBalanceEntry(hodlerPKID *PKID, creatorPKID *PKID) []byte {\n\tkey := append([]byte{}, _PrefixHODLerPKIDCreatorPKIDToBalanceEntry...)\n\tkey = append(key, hodlerPKID[:]...)\n\tkey = append(key, creatorPKID[:]...)\n\treturn key\n}", "func PbkFromHex(pbk string) (ed25519.PublicKey, error) {\n\tk, err := hex.DecodeString(pbk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ed25519.PublicKey(k), nil\n}", "func AddressToPubkey(address string) (pubkey []byte, err error) {\n\terr = errors.New(\"invalid address\")\n\tswitch len(address) {\n\tcase 64:\n\t\tif address[:4] != \"xrb_\" && address[:4] != \"ban_\" {\n\t\t\treturn\n\t\t}\n\t\taddress = address[4:]\n\tcase 65:\n\t\tif address[:5] != \"nano_\" {\n\t\t\treturn\n\t\t}\n\t\taddress = address[5:]\n\tdefault:\n\t\treturn\n\t}\n\tb32 := base32.NewEncoding(\"13456789abcdefghijkmnopqrstuwxyz\")\n\tif pubkey, err = b32.DecodeString(\"1111\" + address[:52]); err != nil {\n\t\treturn\n\t}\n\tpubkey = pubkey[3:]\n\tchecksum, err := checksum(pubkey)\n\tif err != nil {\n\t\treturn\n\t}\n\tif b32.EncodeToString(checksum) != address[52:] {\n\t\terr = errors.New(\"checksum mismatch\")\n\t}\n\treturn\n}", "func GenerateFundKey(addr string) string {\n\treturn fmt.Sprintf(\"fund_addr_%s\", addr)\n}", "func (client *KeyVaultClient) backupKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientBackupKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/backup\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KeyVaultClient) backupKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientBackupKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/backup\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func GenerateCoinKey(addr string) string {\n\treturn fmt.Sprintf(\"coin_addr_%s\", addr)\n}", "func (w *Wallet) ToKey(address string) (keys.PublicKey, error) {\n\treturn keys.PublicKey{}, nil\n}", "func (p *P2C) Balance(key string) (string, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif len(p.hosts) == 0 {\n\t\treturn \"\", liblb.ErrNoHost\n\t}\n\n\t// chosen host\n\tvar host string\n\n\tvar n1, n2 string\n\n\tif len(key) > 0 {\n\t\tn1, n2 = p.hash(key)\n\t} else {\n\t\tn1 = p.hosts[p.rndm.Intn(len(p.hosts))].name\n\t\tn2 = p.hosts[p.rndm.Intn(len(p.hosts))].name\n\t}\n\n\thost = n2\n\n\tif p.loadMap[n1].load <= p.loadMap[n2].load {\n\t\thost = n1\n\t}\n\n\tp.loadMap[host].load++\n\treturn host, nil\n}", "func newAddressPubKeyHash(pkHash []byte, netID [2]byte) (*AddressPubKeyHash,\n\terror) {\n\t// Check for a valid pubkey hash length.\n\tif len(pkHash) != ripemd160.Size {\n\t\treturn nil, errors.New(\"pkHash must be 20 bytes\")\n\t}\n\n\taddr := &AddressPubKeyHash{netID: netID}\n\tcopy(addr.hash[:], pkHash)\n\treturn addr, nil\n}", "func (honest *Honest) bootstrapKeys() {\n\n\thonest.Keys.CommitmentKey = extractCommitmentKey(honest.ncol)\n\thonest.Keys.PubKeyMap, honest.Keys.Skey, honest.Keys.PubKey = extractKeys(honest.id)\n\t// fmt.Println(honest.Keys.PubKeyMap)\n\t// fmt.Println(honest.Keys.Skey)\n\t// fmt.Println(honest.Keys.PubKey)\n\t// fmt.Println(honest.Keys.CommitmentKey)\n\n}", "func GenerateKey(net bitcoin.Network) (*wallet.Key, error) {\r\n\tkey, err := bitcoin.GenerateKey(net)\r\n\tif err != nil {\r\n\t\treturn nil, errors.Wrap(err, \"Failed to generate key\")\r\n\t}\r\n\r\n\tresult := wallet.Key{\r\n\t\tKey: key,\r\n\t}\r\n\r\n\tresult.Address, err = key.RawAddress()\r\n\tif err != nil {\r\n\t\treturn nil, errors.Wrap(err, \"Failed to create key address\")\r\n\t}\r\n\r\n\treturn &result, nil\r\n}", "func BitcoinAddressFromPubKey(pubKey PubKey) BitcoinAddress {\n\treturn BitcoinAddress{\n\t\tVersion: 0,\n\t\tKey: BitcoinPubKeyRipemd160(pubKey),\n\t}\n}", "func (pubKey PubKeyEd25519) Address() []byte { return binary.BinaryRipemd160(pubKey) }", "func keyToAddr(key *secp256k1.PrivateKey, net *chaincfg.Params) (dcrutil.Address, error) {\n\tpubKey := (*secp256k1.PublicKey)(&key.PublicKey)\n\tserializedKey := pubKey.SerializeCompressed()\n\tpubKeyAddr, err := dcrutil.NewAddressSecpPubKey(serializedKey, net)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pubKeyAddr.AddressPubKeyHash(), nil\n}", "func keyPubAddr() (crypto.PrivKey, crypto.PubKey, sdk.AccAddress) {\n\tkeyCounter++\n\tseed := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(seed, keyCounter)\n\n\tkey := ed25519.GenPrivKeyFromSecret(seed)\n\tpub := key.PubKey()\n\taddr := sdk.AccAddress(pub.Address())\n\treturn key, pub, addr\n}", "func AddressFromBalancesStore(key []byte) sdk.AccAddress {\n\tkv.AssertKeyAtLeastLength(key, 1+v1auth.AddrLen)\n\taddr := key[:v1auth.AddrLen]\n\tkv.AssertKeyLength(addr, v1auth.AddrLen)\n\treturn sdk.AccAddress(addr)\n}", "func LoadPublicKeyPair(addr string) (*PublicKeyPair, error) {\n\tp := new(PublicKeyPair)\n\tp.spendKey = edwards25519.NewIdentityPoint()\n\tp.viewKey = edwards25519.NewIdentityPoint()\n\ta := base58.Decode(addr)\n\th := sha3.NewLegacyKeccak256()\n\th.Write(a[:len(a)-4])\n\ts := h.Sum(nil)\n\tif !utils.TestBytes(a[len(a)-4:], s[:4]) {\n\t\treturn nil, fmt.Errorf(\"address checksum fail\")\n\t}\n\tif _, err := p.spendKey.SetBytes(a[1:33]); err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := p.viewKey.SetBytes(a[33:65]); err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}", "func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {\n\treturn newPRSignedBy(keyType, \"\", nil, keyData, signedIdentity)\n}", "func (tx *Transaction) Rekey(rekeyToAddress string) error {\n\taddr, err := DecodeAddress(rekeyToAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx.RekeyTo = addr\n\treturn nil\n}", "func genPubkey() ([]byte, []byte) {\n\t_, pub := btcec.PrivKeyFromBytes(btcec.S256(), randomBytes(32))\n\tpubkey := pub.SerializeCompressed()\n\tpkHash := btcutil.Hash160(pubkey)\n\treturn pubkey, pkHash\n}", "func DeriveAddress(params *chaincfg.Params, xpub string, index uint32) (string, error) {\n\t// Parse the extended public key.\n\tacctKey, err := hdkeychain.NewKeyFromString(xpub)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Derive the appropriate branch key.\n\tbranchKey, err := acctKey.Child(udb.ExternalBranch)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey, err := branchKey.Child(index)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taddr, err := key.Address(params)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn addr.EncodeAddress(), nil\n}", "func generateSpendableKey(seed modules.Seed, index uint64) spendableKey {\n\t// Generate the keys and unlock conditions.\n\tentropy := crypto.HashAll(seed, index)\n\tsk, pk := crypto.GenerateKeyPairDeterministic(entropy)\n\treturn spendableKey{\n\t\tPublicKey: pk,\n\t\tSecretKey: sk,\n\t}\n}", "func GenerateBargainTrackKey(addr string) string {\n\treturn fmt.Sprintf(\"bargain_track_addr_%s\", addr)\n}", "func KeyTestPubAddr() (cryptotypes.PrivKey, cryptotypes.PubKey, sdk.AccAddress) {\n\tkey := secp256k1.GenPrivKey()\n\tpub := key.PubKey()\n\taddr := sdk.AccAddress(pub.Address())\n\treturn key, pub, addr\n}", "func PrizeKey(id []byte) []byte {\n\treturn PrefixKey(PrizePrefix, id)\n}", "func genKeyAndSendCipher(kx *KX, pk *[sntrup4591761.PublicKeySize]byte, ek *[32]byte) (*[32]byte, error) {\n\tc, k, err := sntrup4591761.Encapsulate(rand.Reader, pk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ek != nil {\n\t\terr = kx.writeWithKey(c[:], ek)\n\t} else {\n\t\t_, err = xdr.Marshal(kx.Conn, c)\n\t}\n\treturn k, err\n}", "func (d *AddressDeriver) singleDerive(change uint32, addressIndex uint32) string {\n\tkey, err := hdkeychain.NewKeyFromString(d.xpubs[0])\n\tPanicOnError(err)\n\n\tkey, err = key.Child(change)\n\tPanicOnError(err)\n\n\tkey, err = key.Child(addressIndex)\n\tPanicOnError(err)\n\n\tpubKey, err := key.Address(d.network.ChainConfig())\n\tPanicOnError(err)\n\n\treturn pubKey.String()\n}", "func ArpKey(iface, ipAddr string) string {\n\treturn models.Key(&ARPEntry{\n\t\tInterface: iface,\n\t\tIpAddress: ipAddr,\n\t})\n}", "func (k Keeper) set(ctx sdk.Context, address sdk.AccAddress, balance sdk.Int) (sdk.Int, error) {\n\tif k.ak.GetAccount(ctx, address) == nil {\n\t\tk.ak.SetAccount(ctx, k.ak.NewAccountWithAddress(ctx, address))\n\t}\n\tstore := ctx.KVStore(k.storeKey)\n\tencoded, err := k.cdc.MarshalBinaryLengthPrefixed(balance)\n\tif err != nil {\n\t\treturn sdk.Int{}, sdkerrors.Wrapf(sdkerrors.ErrJSONMarshal, err.Error())\n\t}\n\tstore.Set(address.Bytes(), encoded)\n\treturn balance, nil\n}", "func addNewPk(m *Message) {\n\tfor key, element := range m.Ledger.Accounts {\n\t\tif _, exists1 := ledger.Accounts[key]; !exists1 {\n\t\t\tledger.Accounts[key] = element\n\t\t\tsortKeyPair(key) // sorts the publicKey\n\t\t\t//fmt.Println(\"this the key: \",key)\n\t\t}\n\t}\n}", "func (a *managedAddress) ExportPubKey() string {\n\treturn hex.EncodeToString(a.pubKeyBytes())\n}", "func PubkeyToBananoAddress(pubkey []byte) (address string, err error) {\n\tif len(pubkey) != 32 {\n\t\treturn \"\", errors.New(\"invalid pubkey length\")\n\t}\n\tchecksum, err := checksum(pubkey)\n\tif err != nil {\n\t\treturn\n\t}\n\tpubkey = append([]byte{0, 0, 0}, pubkey...)\n\tb32 := base32.NewEncoding(\"13456789abcdefghijkmnopqrstuwxyz\")\n\treturn \"ban_\" + b32.EncodeToString(pubkey)[4:] + b32.EncodeToString(checksum), nil\n}", "func (pubKey PubKeyEd25519) Address() Address {\n\treturn Address(Sum(pubKey[:]))\n}", "func AddressFromPubKey(pubKey PubKey) Address {\n\taddr := Address{\n\t\tVersion: addressVersion,\n\t\tKey: pubKey.ToAddressHash(),\n\t}\n\taddr.setChecksum()\n\treturn addr\n}", "func GenerateAccountKey(addr string) string {\n\treturn fmt.Sprintf(\"account_addr_%s\", addr)\n}", "func KeyTestPubAddrSecp256R1(t *testing.T) (cryptotypes.PrivKey, cryptotypes.PubKey, sdk.AccAddress) {\n\tkey, err := secp256r1.GenPrivKey()\n\tassert.NilError(t, err)\n\tpub := key.PubKey()\n\taddr := sdk.AccAddress(pub.Address())\n\treturn key, pub, addr\n}", "func GetPkandValidatorPkUsingAddress(w http.ResponseWriter, req *http.Request) {\n\tnow, userIP := globalPkg.SetLogObj(req)\n\tlogStruct := logpkg.LogStruct{\"\", now, userIP, \"macAdress\", \"GetPublickeyUsingAddress\", \"Account\", \"\", \"\", \"\", 0}\n\n\taccountObj := accountdb.AccountStruct{}\n\n\tdecoder := json.NewDecoder(req.Body)\n\tdecoder.DisallowUnknownFields()\n\terr := decoder.Decode(&accountObj)\n\tif err != nil {\n\t\tglobalPkg.SendError(w, \"please enter your correct request \")\n\t\tglobalPkg.WriteLog(logStruct, \"failed to decode Object\", \"failed\")\n\t\treturn\n\t}\n\n\taccountobj := GetAccountByAccountPubicKey(accountObj.AccountPublicKey)\n\tif accountObj.AccountPublicKey == \"\" {\n\t\tglobalPkg.SendError(w, \"invalid address \")\n\t\tglobalPkg.WriteLog(logStruct, \"invalid address\", \"failed\")\n\t\treturn\n\t}\n\tif accountObj.AccountPassword != accountobj.AccountPassword {\n\t\tglobalPkg.SendError(w, \"incorrect password \")\n\t\tglobalPkg.WriteLog(logStruct, \"incorrect password\", \"failed\")\n\t\treturn\n\t}\n\n\tif accountObj.AccountName != accountobj.AccountName {\n\t\tglobalPkg.SendError(w, \"incorrect name\")\n\t\tglobalPkg.WriteLog(logStruct, \"incorrect name\", \"failed\")\n\t\treturn\n\t}\n\n\t//pk := FindpkByAddress(accountObj.AccountPublicKey)\n\n\tpkValidator := validator.CurrentValidator.ValidatorPublicKey\n\n\tdata := map[string]interface{}{\n\t\t//\"Public key for User\": pk.Publickey,\n\t\t\"validatorPublicKey\": pkValidator,\n\t}\n\tjsonObj, _ := json.Marshal(data)\n\tglobalPkg.SendResponse(w, jsonObj)\n\treturn\n}", "func (_ECC *ECCTransactor) ProposeAddKey(opts *bind.TransactOpts, key common.Address, keyType uint8) (*types.Transaction, error) {\n\treturn _ECC.contract.Transact(opts, \"proposeAddKey\", key, keyType)\n}", "func ConvertKey(sk *PrivateKey, pk EllipticPoint) *ecdsa.PrivateKey {\n\tpubKey := ecdsa.PublicKey{\n\t\tCurve: pk.C,\n\t\tX: pk.x,\n\t\tY: pk.y,\n\t}\n\n\tvar D *big.Int\n\n\tif sk != nil {\n\t\tD = new(big.Int)\n\t\tD.SetBytes(*sk.d)\n\t}\n\n\tprivKey := ecdsa.PrivateKey{\n\t\tPublicKey: pubKey,\n\t\tD: D,\n\t}\n\n\treturn &privKey\n}", "func AddressFromHexKey(privKeyHex string) (pubAddressHex string, err error) {\n\tprivKey, err := crypto.HexToECDSA(privKeyHex)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpubAddressHex = crypto.PubkeyToAddress(privKey.PublicKey).Hex()\n\treturn\n}", "func Test_PubkeyFromSeckey(t *testing.T) {\n\t// http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html\n\tprivkey, _ := hex.DecodeString(`f19c523315891e6e15ae0608a35eec2e00ebd6d1984cf167f46336dabd9b2de4`)\n\tdesiredPubKey, _ := hex.DecodeString(`03fe43d0c2c3daab30f9472beb5b767be020b81c7cc940ed7a7e910f0c1d9feef1`)\n\tif pubkey := PubkeyFromSeckey(privkey); pubkey == nil {\n\t\tt.Fatal()\n\t} else if !bytes.Equal(pubkey, desiredPubKey) {\n\t\tt.Fatal()\n\t}\n}", "func PubkeyToAddress(pubkey []byte) common.Address {\n\treturn common.BytesToAddress(crypto.Keccak256(pubkey[1:])[12:])\n}", "func DoPK(ctx *cli.Context, privateKey *ecdsa.PrivateKey, granteePublicKey string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) {\n\tif granteePublicKey == \"\" {\n\t\treturn nil, nil, errors.New(\"need a grantee Public Key\")\n\t}\n\tb, err := hex.DecodeString(granteePublicKey)\n\tif err != nil {\n\t\tlog.Error(\"error decoding grantee public key\", \"err\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tgranteePub, err := crypto.DecompressPubkey(b)\n\tif err != nil {\n\t\tlog.Error(\"error decompressing grantee public key\", \"err\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tsessionKey, err = NewSessionKeyPK(privateKey, granteePub, salt)\n\tif err != nil {\n\t\tlog.Error(\"error getting session key\", \"err\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tae, err = NewAccessEntryPK(hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)), salt)\n\tif err != nil {\n\t\tlog.Error(\"error generating access entry\", \"err\", err)\n\t\treturn nil, nil, err\n\t}\n\n\treturn sessionKey, ae, nil\n}", "func (c *ProcConfig) EnsureKey() {\n\tif c.ProcKey.Null() {\n\t\tc.ProcKey = RandProcKey()\n\t}\n}", "func (_ResolverContract *ResolverContractTransactor) SetPubkey(opts *bind.TransactOpts, node [32]byte, x [32]byte, y [32]byte) (*types.Transaction, error) {\n\treturn _ResolverContract.contract.Transact(opts, \"setPubkey\", node, x, y)\n}", "func HexKeyToAddress(hexKey string) common.Address {\n\tkey, _ := HexToSM2(hexKey)\n\tpubBytes := SM2PubBytes(&key.PublicKey)\n\tsm3digest := sm3.Hash(pubBytes)\n\treturn common.BytesToAddress(sm3digest[12:])\n}", "func pubKeyAddrFromBytes(t *testing.T, pubKey []byte, params *chaincfg.Params) *btcutil.AddressPubKey {\n\tpubKeyAddr, err := btcutil.NewAddressPubKey(pubKey, params)\n\t_assert.NoError(t, err)\n\treturn pubKeyAddr\n}", "func GenerateSmartContractKey(addr string) string {\n\treturn fmt.Sprintf(\"smartcontract_addr_%s\", addr)\n}", "func (e AssignDestAddressRequestValidationError) Key() bool { return e.key }", "func (acc *Account) PublicKeyToAddress(pubkey string) (string, error) {\n\tpub, err := hex.DecodeString(pubkey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar key secp256k1.PubKeySecp256k1\n\tcopy(key[:], pub)\n\treturn AddrPrefix + base58.CheckEncode(key.Address(), byte(addrVersion)), nil\n}", "func overrideBip32Sequence(k *hdkeychain.ExtendedKey, addrIdx uint32) (*hdkeychain.ExtendedKey, error) {\n\t// Serialize the base58 key, and decode the resulting base58-check string.\n\tdecoded, version, err := base58.CheckDecode(k.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Ensure the BIP32 key has the new address-index (last derivation index)\n\tbinary.BigEndian.PutUint32(decoded[8:12], addrIdx)\n\n\tencoded := base58.CheckEncode(decoded, version)\n\tnewKey, err := hdkeychain.NewKeyFromString(encoded)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newKey, nil\n}", "func (k Keeper) addPubkey(ctx sdk.Context, pubkey crypto.PubKey) {\n\taddr := pubkey.Address()\n\tk.setAddrPubkeyRelation(ctx, addr, pubkey)\n}", "func (_Contract *ContractTransactor) SetPubkey(opts *bind.TransactOpts, node [32]byte, x [32]byte, y [32]byte) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"setPubkey\", node, x, y)\n}", "func (*produceRequest) Key() int16 { return 0 }", "func main() {\n\t// network := flag.String(\"network\", \"http://51.83.36.184:20002\", \"network chain\")\n\tamount := flag.Float64(\"amount\", 0.0001, \"amount ETH\")\n\t// privKeyAcc := flag.String(\"privateKey\",\"112t8rnsqDitXckbWMPo4wGbjwyPtYHywApPqfZVQNatrMzfDLERCmHTBPsHUZjhzFLxdVmQ6m6W5ppbK4PZCzWVjEBvi3a7SVrtVpd6GZSL\",\"privateKey Incognito account deposit\")\n\tpaymentKeyAcc := flag.String(\"paymentKey\", \"12S3Xv2N9KvGZivRESKUQWv6obrghwykAUxqc85nTcZQ9AJMxnJe4Ct97BjAm5vFJ9bhhaHXDCmGfbXEqbS766DyeMLLeYksDM1FmSg\", \"paymentkey Incognito account deposit\")\n\n\tflag.Parse()\n\t\n\tvar pt PortalV3Base\n\n\tpt.IncBurningAddrStr = \"12RxahVABnAVCGP3LGwCn8jkQxgw7z1x14wztHzn455TTVpi1wBq9YGwkRMQg3J4e657AbAnCvYCJSdA9czBUNuCKwGSRQt55Xwz8WA\"\n\tpt.IncPrivKeyStr = \"112t8rnsqDitXckbWMPo4wGbjwyPtYHywApPqfZVQNatrMzfDLERCmHTBPsHUZjhzFLxdVmQ6m6W5ppbK4PZCzWVjEBvi3a7SVrtVpd6GZSL\"\n\tpt.IncPaymentAddrStr = *paymentKeyAcc\n\n\tpt.BnbAddStr = \"6abd698ea7ddd1f98b1ecaaddab5db0453b8363ff092f0d8d7d4c6b1155fb693\"\n\tpt.BtcAddStr = \"ef5947f70ead81a76a53c7c8b7317dd5245510c665d3a13921dc9a581188728b\"\n\tpt.BnbRemoteAddStr = \"tbnb172pnrmd0409237jwlq5qjhw2s2r7lq6ukmaeke\"\n\tpt.BtcRemoteAddStr = \"mhpTRAPdmyB1PUvXR2yqaSBK8ZJhEQ8rEw\"\n\n\tpt.EtherAddressStr = \"0x0000000000000000000000000000000000000000\"\n\tpt.IncEtherTokenIDStr = \"ffd8d42dc40a8d166ea4848baf8b5f6e9fe0e9c30d60062eb7d44a8df9e00854\"\n\tpt.ETHPrivKeyStr = \"A5AE26C7154410DF235BC8669FFD27C0FC9D3068C21E469A4CC68165C68CD5CB\"\n\tpt.ETHOwnerAddrStr = \"cE40cE511A5D084017DBee7e3fF3e455ea32D85c\"\n\n\tpt.ETHHost = \"https://kovan.infura.io/v3/93fe721349134964aa71071a713c5cef\"\n\tpt.IncBridgeHost = \"http://51.79.76.38:8334\"\n\tpt.IncRPCHost = \"http://51.79.76.38:8334\"\n\n\tETHPrivKey, ETHClient, _ := ethInstance(pt.ETHPrivKeyStr, pt.ETHHost)\n\n\tpt.ETHClient = ETHClient\n\tpt.ETHPrivKey = ETHPrivKey\n\tpt.auth = bind.NewKeyedTransactor(ETHPrivKey)\n\n\tpt.Portalv3 = common.HexToAddress(\"0x6D53de7aFa363F779B5e125876319695dC97171E\")\n\tpt.portalV3Inst, _ = portalv3.NewPortalv3(pt.Portalv3, pt.ETHClient)\n\n\t// pt.USDTAddress = common.HexToAddress(\"0x3a829f4b97660d970428cd370c4e41cbad62092b\")\n\t// fmt.Printf(\"usdt address: %s\\n\", pt.USDTAddress.Hex())\n\t// pt.USDCAddress = common.HexToAddress(\"0x75b0622cec14130172eae9cf166b92e5c112faff\")\n\t// fmt.Printf(\"usdc address: %s\\n\", pt.USDCAddress.Hex())\n\n\t// fmt.Println(\"------------ deposit ETH-------------\")\n\n\tDepositingEther := float64(*amount)\n\t\n\ttxHash := pt.depositETH(\n\t\tDepositingEther,\n\t\tpt.IncPaymentAddrStr,\n\t)\n\ttime.Sleep(5 * time.Second)\n\t_, ethBlockHash, ethTxIdx, ethDepositProof, _ := getETHDepositProof(pt.ETHHost, txHash)\n\t// require.Equal(pt.T(), nil, err)\n\tfmt.Println(\"BlockHash : \", ethBlockHash)\n\tfmt.Println(\"TxIndex : \", ethTxIdx)\n\tfmt.Println(\"ProofStrs : \", ethDepositProof)\n\n\t// fmt.Println(\"------------ deposit USDT --------------\")\n\t// txHash = pt.depositERC20ToBridge(\n\t// \tbig.NewInt(0.01*1e6),\n\t// \tpt.USDTAddress,\n\t// \tpt.IncPaymentAddrStr,\n\t// )\n\n\t// _, ethBlockHash, ethTxIdx, ethDepositProof, _ = getETHDepositProof(pt.ETHHost, txHash)\n\t// // require.Equal(pg.T(), nil, err)\n\t// fmt.Println(\"depositProof usdt ---- : \", ethBlockHash, ethTxIdx, ethDepositProof)\n}", "func packKey(field []byte, data []byte) ([]byte, error) {\n\treturn packKeys(field, [][]byte{data})\n\t//w := new(bytes.Buffer)\n\t//err := serialization.WriteVarBytes(w, data)\n\t//if err != nil {\n\t//\treturn nil, errors.NewDetailErr(err, errors.ErrNoCode, \"[AuthContract] packKey failed\")\n\t//}\n\t//key := append(field, w.Bytes()...)\n\t//return key, nil\n}", "func (c *Constructor) newAddress(ctx context.Context) (string, error) {\n\tkp, err := keys.GenerateKeypair(c.curveType)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w unable to generate keypair\", err)\n\t}\n\n\taddress, _, err := c.helper.Derive(\n\t\tctx,\n\t\tc.network,\n\t\tkp.PublicKey,\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to derive address\", err)\n\t}\n\n\terr = c.helper.StoreKey(ctx, address, kp)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to store address\", err)\n\t}\n\n\tif err := c.handler.AddressCreated(ctx, address); err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: could not handle address creation\", err)\n\t}\n\n\treturn address, nil\n}", "func XRP_importKeyFromSeed(seed string, cryptoType string) crypto.Key {\n\tshash, err := crypto.NewRippleHashCheck(seed, crypto.RIPPLE_FAMILY_SEED)\n\tcheckErr(err)\n\tswitch cryptoType {\n\tcase \"ed25519\":\n\t\tkey, _ := crypto.NewEd25519Key(shash.Payload())\n\t\treturn key\n\tcase \"ecdsa\":\n\t\tkey, _ := crypto.NewECDSAKey(shash.Payload())\n\t\treturn key\n\tdefault:\n\t\treturn nil\n\t}\n}", "func Byt2PK(pkB []byte) (*ecdsa.PublicKey, error) {\n\tpk, err := x509.ParsePKIXPublicKey(pkB)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pk.(*ecdsa.PublicKey), nil\n}", "func (w Wallet) Address() []byte {\n\tripemd160 := PublicKeyHash(w.PublicKey)\n\n\tversionedRimpemd160 := append([]byte{version}, ripemd160...)\n\tchecksum := CheckSumSlice(versionedRimpemd160)\n\n\tfullHash := append(versionedRimpemd160, checksum...)\n\taddress := Base58Encode(fullHash)\n\n\treturn address\n}", "func (_Gatekeeper *GatekeeperSession) Payout(_proof []byte, _root *big.Int, _from common.Address, _txNumber *big.Int, _value *big.Int) (*types.Transaction, error) {\n\treturn _Gatekeeper.Contract.Payout(&_Gatekeeper.TransactOpts, _proof, _root, _from, _txNumber, _value)\n}", "func (_ResolverContract *ResolverContractTransactorSession) SetPubkey(node [32]byte, x [32]byte, y [32]byte) (*types.Transaction, error) {\n\treturn _ResolverContract.Contract.SetPubkey(&_ResolverContract.TransactOpts, node, x, y)\n}", "func (kp *FromAddress) LibP2PPubKey() (*libp2pc.Ed25519PublicKey, error) {\n\tpmes := new(pb.PublicKey)\n\tpmes.Data = kp.publicKey()[:]\n\tpk, err := libp2pc.UnmarshalEd25519PublicKey(pmes.GetData())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tepk, ok := pk.(*libp2pc.Ed25519PublicKey)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\treturn epk, nil\n}", "func GenerateCoinKey(algo keyring.SignatureAlgo, cdc codec.Codec) (sdk.AccAddress, string, error) {\n\t// generate a private key, with mnemonic\n\tinfo, secret, err := keyring.NewInMemory(cdc).NewMnemonic(\n\t\t\"name\",\n\t\tkeyring.English,\n\t\tsdk.GetConfig().GetFullBIP44Path(),\n\t\tkeyring.DefaultBIP39Passphrase,\n\t\talgo,\n\t)\n\tif err != nil {\n\t\treturn sdk.AccAddress{}, \"\", err\n\t}\n\taddr, err := info.GetAddress()\n\tif err != nil {\n\t\treturn sdk.AccAddress{}, \"\", err\n\t}\n\treturn addr, secret, nil\n}", "func (dcr *ExchangeWallet) createSig(tx *wire.MsgTx, idx int, pkScript []byte, addr dcrutil.Address) (sig, pubkey []byte, err error) {\n\tsigTyper, ok := addr.(signatureTyper)\n\tif !ok {\n\t\treturn nil, nil, fmt.Errorf(\"invalid address type\")\n\t}\n\n\tpriv, pub, err := dcr.getKeys(addr)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tsigType := sigTyper.DSA()\n\tsig, err = txscript.RawTxInSignature(tx, idx, pkScript, txscript.SigHashAll, priv.Serialize(), sigType)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn sig, pub.SerializeCompressed(), nil\n}", "func main() {\r\n\tdir := os.Args[1]\r\n\tpassphrase := os.Args[2]\r\n\r\n\tfmt.Println(\"Add new account to key store: \",dir,\" passphrase: \",passphrase)\r\n\r\n\tks := keystore.NewKeyStore(dir, 262144, 1)\r\n\tif a,err := ks.NewAccount(passphrase); err!=nil {\r\n\t\tfmt.Println(err)\r\n\t} else {\r\n\t\tfmt.Println(a.Address.Hex())\r\n\t}\r\n}", "func CreateSwapRequest(args *SwapArguments) []byte {\n\tlog.Debug(\"swap args\", \"args\", args)\n\n\tconv := convert.NewConvert()\n\n\tpartyKey := GetAccountKey(args.Party)\n\tcounterPartyKey := GetAccountKey(args.CounterParty)\n\n\tsigners := GetSigners()\n\n\tfee := conv.GetCoin(args.Fee, \"OLT\")\n\tgas := conv.GetCoin(args.Gas, \"OLT\")\n\tamount := conv.GetCoin(args.Amount, args.Currency)\n\texchange := conv.GetCoin(args.Exchange, args.Excurrency)\n\n\tif conv.HasErrors() {\n\t\tConsole.Error(conv.GetErrors())\n\t\tos.Exit(-1)\n\t}\n\taccount := make(map[data.ChainType]string)\n\tcounterAccount := make(map[data.ChainType]string)\n\t//todo: change the correct bitcoin address\n\t//account[data.BITCOIN] = string(partyKey)\n\t//account[data.ETHEREUM] = ethereum.GetAddress().String()\n\taccount[data.BITCOIN] = \"bitcoin\"\n\taccount[data.ETHEREUM] = \"ethereum\"\n\n\tparty := action.Party{Key: partyKey, Accounts: account}\n\tcounterParty := action.Party{Key: counterPartyKey, Accounts: counterAccount}\n\n\tswap := &action.Swap{\n\t\tBase: action.Base{\n\t\t\tType: action.SWAP,\n\t\t\tChainId: app.ChainId,\n\t\t\tSigners: signers,\n\t\t\tOwner: partyKey,\n\t\t\tSequence: global.Current.Sequence,\n\t\t},\n\t\tParty: party,\n\t\tCounterParty: counterParty,\n\t\tFee: fee,\n\t\tGas: gas,\n\t\tAmount: amount,\n\t\tExchange: exchange,\n\t\tNonce: args.Nonce,\n\t}\n\n\treturn SignAndPack(action.SWAP, action.Transaction(swap))\n}", "func PubkeyToAddress(pubkey []byte) (address string, err error) {\n\tif len(pubkey) != 32 {\n\t\treturn \"\", errors.New(\"invalid pubkey length\")\n\t}\n\tchecksum, err := checksum(pubkey)\n\tif err != nil {\n\t\treturn\n\t}\n\tpubkey = append([]byte{0, 0, 0}, pubkey...)\n\tb32 := base32.NewEncoding(\"13456789abcdefghijkmnopqrstuwxyz\")\n\treturn \"nano_\" + b32.EncodeToString(pubkey)[4:] + b32.EncodeToString(checksum), nil\n}", "func getAddress(mn string) string {\n\tsk, err := mnemonic.ToPrivateKey(mn)\n\tif err != nil {\n\t\tfmt.Printf(\"error recovering account: %s\\n\", err)\n\t\treturn \"\"\n\t}\n\tpk := sk.Public()\n\tvar a types.Address\n\tcpk := pk.(ed25519.PublicKey)\n\tcopy(a[:], cpk[:])\n\tfmt.Printf(\"Address: %s\\n\", a.String())\n\taddress := a.String()\n\treturn address\n}", "func main() {\n\tp384 := elliptic.P384()\n\tpriv1, _ := ecdsa.GenerateKey(p384, rand.Reader)\n\n\tprivateKeyBytes, _ := x509.MarshalECPrivateKey(priv1)\n\n\tencodedBytes := hex.EncodeToString(privateKeyBytes)\n\tfmt.Println(\"Private key:\")\n\tfmt.Printf(\"%s\\n\", encodedBytes)\n\n\tprivateKeyBytesRestored, _ := hex.DecodeString(encodedBytes)\n\tpriv2, _ := x509.ParseECPrivateKey(privateKeyBytesRestored)\n\n\tpublicKeyBytes, _ := x509.MarshalPKIXPublicKey(&priv1.PublicKey)\n\tencodedPubBytes := hex.EncodeToString(publicKeyBytes)\n\tfmt.Println(\"Public key:\")\n\tfmt.Printf(\"%s\\n\", encodedPubBytes)\n\n\tdata := []byte(\"data\")\n\t// Signing by priv1\n\tr, s, _ := ecdsa.Sign(rand.Reader, priv1, data)\n\n\t// Verifying against priv2 (restored from priv1)\n\tif !ecdsa.Verify(&priv2.PublicKey, data, r, s) {\n\t\tfmt.Printf(\"Error\")\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Key was restored from string successfully\\n\")\n}", "func (mt *memoryTable) PubKey(addr string) (cipher.PubKey, bool) {\n\tpk, ok := mt.reverse[addr]\n\treturn pk, ok\n}", "func (_ResolverContract *ResolverContractSession) SetPubkey(node [32]byte, x [32]byte, y [32]byte) (*types.Transaction, error) {\n\treturn _ResolverContract.Contract.SetPubkey(&_ResolverContract.TransactOpts, node, x, y)\n}", "func orderBookKey(orderID string) []byte {\n\treturn myposchain.ConcatKeys(OrderBookKeyPrefix, []byte{0x0}, []byte(orderID))\n}", "func pubkeyFromSeckey(seckey []byte) []byte {\n\tif len(seckey) != 32 {\n\t\tlog.Panic(\"seckey length invalid\")\n\t}\n\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tlog.Panic(\"always ensure seckey is valid\")\n\t\treturn nil\n\t}\n\n\tpubkey := secp.GeneratePublicKey(seckey)\n\tif pubkey == nil {\n\t\tlog.Panic(\"ERROR: impossible, secp.GeneratePublicKey should never fail\")\n\t\treturn nil\n\t}\n\tif len(pubkey) != 33 {\n\t\tlog.Panic(\"ERROR: impossible, invalid pubkey length\")\n\t}\n\n\tif ret := secp.PubkeyIsValid(pubkey); ret != 1 {\n\t\tlog.Panicf(\"ERROR: pubkey invald, ret=%d\", ret)\n\t\treturn nil\n\t}\n\n\tif ret := VerifyPubkey(pubkey); ret != 1 {\n\t\tlog.Printf(\"seckey=%s\\n\", hex.EncodeToString(seckey))\n\t\tlog.Printf(\"pubkey=%s\\n\", hex.EncodeToString(pubkey))\n\t\tlog.Panicf(\"ERROR: pubkey verification failed, for deterministic. ret=%d\", ret)\n\t\treturn nil\n\t}\n\n\treturn pubkey\n}", "func (_Gatekeeper *GatekeeperTransactor) Payout(opts *bind.TransactOpts, _proof []byte, _root *big.Int, _from common.Address, _txNumber *big.Int, _value *big.Int) (*types.Transaction, error) {\n\treturn _Gatekeeper.contract.Transact(opts, \"Payout\", _proof, _root, _from, _txNumber, _value)\n}", "func (sk *PrivateKey) Pack(buf *[PrivateKeySize]byte) {\n\tvar tmp [mode2.PrivateKeySize]byte\n\tsk.d.Pack(&tmp)\n\tcopy(buf[:mode2.PrivateKeySize], tmp[:])\n\tcopy(buf[mode2.PrivateKeySize:], sk.e.Seed())\n}", "func newSweepPkScript(wallet lnwallet.WalletController) ([]byte, error) {\n\tsweepAddr, err := wallet.NewAddress(lnwallet.WitnessPubKey, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn txscript.PayToAddrScript(sweepAddr)\n}", "func AddKey(s Server, password string, template *Key) (*Key, error) {\n\t// fill meta data about key\n\tnewkey := &Key{\n\t\tCreated: time.Now(),\n\t\tKDF: \"scrypt\",\n\t\tN: scryptN,\n\t\tR: scryptR,\n\t\tP: scryptP,\n\t}\n\n\thn, err := os.Hostname()\n\tif err == nil {\n\t\tnewkey.Hostname = hn\n\t}\n\n\tusr, err := user.Current()\n\tif err == nil {\n\t\tnewkey.Username = usr.Username\n\t}\n\n\t// generate random salt\n\tnewkey.Salt = make([]byte, scryptSaltsize)\n\tn, err := rand.Read(newkey.Salt)\n\tif n != scryptSaltsize || err != nil {\n\t\tpanic(\"unable to read enough random bytes for salt\")\n\t}\n\n\t// call scrypt() to derive user key\n\tnewkey.user, err = newkey.scrypt(password)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif template == nil {\n\t\t// generate new random master keys\n\t\tnewkey.master, err = newkey.newKeys()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// copy master keys from old key\n\t\tnewkey.master = template.master\n\t}\n\n\t// encrypt master keys (as json) with user key\n\tbuf, err := json.Marshal(newkey.master)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewkey.Data = GetChunkBuf(\"key\")\n\tn, err = newkey.EncryptUser(newkey.Data, buf)\n\tnewkey.Data = newkey.Data[:n]\n\n\t// dump as json\n\tbuf, err = json.Marshal(newkey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// store in repository and return\n\tid, err := s.Create(backend.Key, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewkey.id = id\n\n\tFreeChunkBuf(\"key\", newkey.Data)\n\n\treturn newkey, nil\n}", "func (_HbSwap *HbSwapTransactor) TradePrep(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _HbSwap.contract.Transact(opts, \"tradePrep\")\n}", "func pubkeyFromSeckey(seckey []byte) []byte {\n\tif len(seckey) != 32 {\n\t\tlog.Panic(\"seckey length invalid\")\n\t}\n\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tlog.Panic(\"always ensure seckey is valid\")\n\t\treturn nil\n\t}\n\n\tvar pubkey []byte = secp.GeneratePublicKey(seckey) //always returns true\n\tif pubkey == nil {\n\t\tlog.Panic(\"ERROR: impossible, secp.BaseMultiply always returns true\")\n\t\treturn nil\n\t}\n\tif len(pubkey) != 33 {\n\t\tlog.Panic(\"ERROR: impossible, invalid pubkey length\")\n\t}\n\n\tif ret := secp.PubkeyIsValid(pubkey); ret != 1 {\n\t\tlog.Panic(\"ERROR: pubkey invald, ret=%s\", ret)\n\t\treturn nil\n\t}\n\n\tif ret := VerifyPubkey(pubkey); ret != 1 {\n\n\t\tlog.Printf(\"seckey= %s\", hex.EncodeToString(seckey))\n\t\tlog.Printf(\"pubkey= %s\", hex.EncodeToString(pubkey))\n\t\tlog.Panic(\"ERROR: pubkey verification failed, for deterministic. ret=%d\", ret)\n\t\treturn nil\n\t}\n\n\treturn pubkey\n}", "func AddressToPubKeyHash(s string) ([20]byte, error) {\n\tnetPkHashCheck := base58decode(s)\n\tif len(netPkHashCheck) != 25 {\n\t\treturn [20]byte{}, errors.New(\"netPkHashCheck has length different than 25\")\n\t}\n\n\tvar check [4]byte\n\tcopy(check[:], netPkHashCheck[21:])\n\tchecksum := hash.Hash256(netPkHashCheck[:21])\n\tif !bytes.Equal(check[:], checksum[:4]) {\n\t\treturn [20]byte{}, errors.New(\"invalid address: checksums don't match\")\n\t}\n\n\tvar pkHash160 [20]byte\n\tcopy(pkHash160[:], netPkHashCheck[1:21])\n\treturn pkHash160, nil\n}", "func (k *PublicKeySECP256K1R) Address() ids.ShortID {\n\tif k.addr.IsZero() {\n\t\taddr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes()))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tk.addr = addr\n\t}\n\treturn k.addr\n}", "func LienByAddressDecodeKey(key []byte) sdk.AccAddress {\n\treturn sdk.AccAddress(key)\n}", "func (pubKey PubKeyEd25519) Address() []byte {\n\tw, n, err := new(bytes.Buffer), new(int64), new(error)\n\twire.WriteBinary(pubKey[:], w, n, err)\n\tif *err != nil {\n\t\tPanicCrisis(*err)\n\t}\n\t// append type byte\n\tencodedPubkey := append([]byte{1}, w.Bytes()...)\n\thasher := ripemd160.New()\n\thasher.Write(encodedPubkey) // does not error\n\treturn hasher.Sum(nil)\n}", "func Payment(souce_sk string, sourceAccount *horizon.Account, dest_pk string, amount string, asset txnbuild.Asset) error {\n source_kp, _ := keypair.Parse(souce_sk)\n\n paymentOp := txnbuild.Payment{\n Destination: dest_pk,\n Amount: amount,\n Asset: asset,\n }\n s := []txnbuild.Operation{&paymentOp}\n\n err := tools.Transaction(source_kp, sourceAccount, s)\n\n return err\n}", "func WithHBalancerKey(key int64) CallOption {\n\thval := uint32(0)\n\tif key > 0 {\n\t\tvar slicingUpdate [64]byte\n\t\tcopy(slicingUpdate[:], (*[8]byte)(unsafe.Pointer(&key))[:])\n\t\thval = crc32.ChecksumIEEE(slicingUpdate[:8])\n\t}\n\n\treturn beforeCall(func(o *callInfo) error {\n\t\to.hbKey = &hval\n\t\treturn nil\n\t})\n}", "func GetAddressFromPublicKey(publicKey []byte) string {\n\t//publicKeyHash := sha256.Sum256(publicKey)\n\n\tpk, err := crypto.PublicKeyFromBytes(publicKey)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\treturn pk.ToAddress()\n\n}", "func generateKey() {\n\tpassphrase := os.Getenv(passphraseEnvironmentVariable)\n\tif passphrase == \"\" {\n\t\tprintErrorAndExit(fmt.Errorf(\"skicka: SKICKA_PASSPHRASE \" +\n\t\t\t\"environment variable not set.\\n\"))\n\t}\n\n\t// Derive a 64-byte hash from the passphrase using PBKDF2 with 65536\n\t// rounds of SHA256.\n\tsalt := getRandomBytes(32)\n\thash := pbkdf2.Key([]byte(passphrase), salt, 65536, 64, sha256.New)\n\tif len(hash) != 64 {\n\t\tlog.Fatalf(\"incorrect key size returned by pbkdf2 %d\\n\", len(hash))\n\t}\n\n\t// We'll store the first 32 bytes of the hash to use to confirm the\n\t// correct passphrase is given on subsequent runs.\n\tpassHash := hash[:32]\n\t// And we'll use the remaining 32 bytes as a key to encrypt the actual\n\t// encryption key. (These bytes are *not* stored).\n\tkeyEncryptKey := hash[32:]\n\n\t// Generate a random encryption key and encrypt it using the key\n\t// derived from the passphrase.\n\tkey := getRandomBytes(32)\n\tiv := getRandomBytes(16)\n\tencryptedKey := encryptBytes(keyEncryptKey, iv, key)\n\n\tfmt.Printf(\"; Add the following lines to the [encryption] section\\n\")\n\tfmt.Printf(\"; of your ~/.skicka.config file.\\n\")\n\tfmt.Printf(\"\\tsalt=%s\\n\", hex.EncodeToString(salt))\n\tfmt.Printf(\"\\tpassphrase-hash=%s\\n\", hex.EncodeToString(passHash))\n\tfmt.Printf(\"\\tencrypted-key=%s\\n\", hex.EncodeToString(encryptedKey))\n\tfmt.Printf(\"\\tencrypted-key-iv=%s\\n\", hex.EncodeToString(iv))\n}", "func CreateP2PKey(dir, nickname, password, hrp string, scryptN, scryptP int) (types.Address, error) {\n\tprivateKey := ed25519.NewKey()\n\n\treturn saveAccountKey(dir, nickname, password, hrp, \"\", \"\", \"\", scryptN, scryptP, privateKey, false)\n}", "func (_Gatekeeper *GatekeeperTransactorSession) Payout(_proof []byte, _root *big.Int, _from common.Address, _txNumber *big.Int, _value *big.Int) (*types.Transaction, error) {\n\treturn _Gatekeeper.Contract.Payout(&_Gatekeeper.TransactOpts, _proof, _root, _from, _txNumber, _value)\n}", "func AddrKey() (key []byte) {\n\tkey = append(key, []byte(\"mavl-\"+issuanceE.IssuanceX+\"-addr\")...)\n\treturn key\n}", "func (r *Resolver) SetPubKey(opts *bind.TransactOpts, x [32]byte, y [32]byte) (*types.Transaction, error) {\n\tnameHash, err := NameHash(r.domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.Contract.SetPubkey(opts, nameHash, x, y)\n}", "func passphraseToKey(passphrase, engineId []byte) []byte {\n\th := sha1.New()\n\n\tpassphraseLength := len(passphrase)\n\n\t// Write 1 MB to the hash\n\trepeat, remain := oneMegabyte/passphraseLength, oneMegabyte%passphraseLength\n\n\tfor repeat > 0 {\n\t\th.Write(passphrase)\n\t\trepeat--\n\t}\n\n\tif remain > 0 {\n\t\th.Write(passphrase[:remain])\n\t}\n\n\tsum := h.Sum(nil)\n\n\th.Reset()\n\n\th.Write(sum)\n\th.Write(engineId)\n\th.Write(sum)\n\n\treturn h.Sum(nil)\n}" ]
[ "0.564435", "0.5613641", "0.5389401", "0.5386682", "0.53690535", "0.5327332", "0.5317667", "0.5300769", "0.5269204", "0.5228631", "0.5193929", "0.5192163", "0.5190385", "0.5186733", "0.51589626", "0.51504695", "0.5146894", "0.5137371", "0.5123204", "0.5114298", "0.50923884", "0.5070942", "0.5070594", "0.50360614", "0.50170255", "0.49919334", "0.4986463", "0.49780062", "0.49680415", "0.49599814", "0.49589574", "0.4927043", "0.49123108", "0.49103218", "0.4906464", "0.49052238", "0.4903372", "0.48978695", "0.48968357", "0.48897257", "0.48773223", "0.4853858", "0.4852001", "0.4850408", "0.48483214", "0.48444256", "0.48414406", "0.48270762", "0.48135462", "0.48120058", "0.4806959", "0.4805408", "0.47914708", "0.47913095", "0.47896653", "0.47894806", "0.47763735", "0.47755322", "0.4774014", "0.47725952", "0.47664312", "0.47632056", "0.47615615", "0.47608835", "0.47575018", "0.47344357", "0.47265372", "0.47234213", "0.470876", "0.4704163", "0.46993387", "0.46964848", "0.46957138", "0.4695372", "0.4690418", "0.46885526", "0.46877018", "0.46742544", "0.4669403", "0.46670207", "0.4662798", "0.46600187", "0.46558756", "0.46556064", "0.4654797", "0.4653076", "0.46517095", "0.4651427", "0.46512842", "0.46510515", "0.46507895", "0.46480343", "0.46468407", "0.464545", "0.4642451", "0.4641809", "0.46393853", "0.463925", "0.4639198", "0.46383712" ]
0.7228061
0
FileStoreKey turn an address to key used to get it from the account store
func FileStoreKey(sender []byte) []byte { return append(FileStoreKeyPrefix, sender...) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func AddressStoreKey(addr sdk.AccAddress) []byte {\n\treturn append(AddressStoreKeyPrefix, addr.Bytes()...)\n}", "func KeyByFilename(keyFname string) (*[32]byte, error) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfp := filepath.Join(cwd, keyFname)\n\tb, err := ioutil.ReadFile(fp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn DecodeKey(string(b[:64])), nil\n}", "func ChainLinksStoreKey(user, chainName, address string) []byte {\n\treturn append(UserChainLinksPrefix(user), []byte(chainName+address)...)\n}", "func (k Keeper) getInviteStoreKey(user sdk.AccAddress) []byte {\n\treturn []byte(types.InviteStorePrefix + user.String())\n}", "func RelationshipsStoreKey(user, subspace, recipient string) []byte {\n\treturn append(UserRelationshipsSubspacePrefix(user, subspace), []byte(recipient)...)\n}", "func keyIDFromAddr(addr string, group *key.Group) *key.Identity {\n\tids := group.Identities()\n\tfor _, id := range ids {\n\t\tif id.Address() == addr {\n\t\t\treturn id\n\t\t}\n\t}\n\tfatal(\"Could not retrive the node you are trying to contact in the group file.\")\n\treturn nil\n}", "func KeyFile(dir string) string {\n\treturn mustWriteToFile(dir, key)\n}", "func AddrKey() (key []byte) {\n\tkey = append(key, []byte(\"mavl-\"+issuanceE.IssuanceX+\"-addr\")...)\n\treturn key\n}", "func ContactIdentityKey(id string) ([]byte, error) {\n\ts := textSecureStore\n\tidkeyfile := filepath.Join(s.identityDir, \"remote_\"+id)\n\tif !exists(idkeyfile) {\n\t\treturn nil, UnknownContactError{id}\n\t}\n\tb, err := s.readFile(idkeyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn append([]byte{5}, b...), nil\n}", "func AccountNumberStoreKey(accountNumber uint64) []byte {\n\treturn append(AccountNumberStoreKeyPrefix, sdk.Uint64ToBigEndian(accountNumber)...)\n}", "func ImportKeyStore(keyPath, name, passphrase string) (string, error) {\n\tkeyPath, err := filepath.Abs(keyPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tkeyJSON, readError := ioutil.ReadFile(keyPath)\n\tif readError != nil {\n\t\treturn \"\", readError\n\t}\n\tif name == \"\" {\n\t\tname = generateName() + \"-imported\"\n\t\tfor store.DoesNamedAccountExist(name) {\n\t\t\tname = generateName() + \"-imported\"\n\t\t}\n\t} else if store.DoesNamedAccountExist(name) {\n\t\treturn \"\", fmt.Errorf(\"account %s already exists\", name)\n\t}\n\tkey, err := keystore.DecryptKey(keyJSON, passphrase)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tb32 := address.ToBech32(key.Address)\n\thasAddress := store.FromAddress(b32) != nil\n\tif hasAddress {\n\t\treturn \"\", fmt.Errorf(\"address %s already exists in keystore\", b32)\n\t}\n\tuDir, _ := homedir.Dir()\n\tnewPath := filepath.Join(uDir, common.DefaultConfigDirName, common.DefaultConfigAccountAliasesDirName, name, filepath.Base(keyPath))\n\terr = writeToFile(newPath, string(keyJSON))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn name, nil\n}", "func useKey(key *otr.PrivateKey) {\n\tprivateKey = *key\n\tfingerprint := string(key.PublicKey.Fingerprint())\n\tif _, ok := contacts[fingerprint]; !ok {\n\t\tcontacts[\"me\"] = fingerprint\n\t\tcontactsReverse[fingerprint] = \"me\"\n\t}\n}", "func getKey(keydir, keyfile string) (ssh.Signer, error) {\n\t/* Work out where the file should be */\n\tif !filepath.IsAbs(keyfile) {\n\t\tkeyfile = filepath.Join(keydir, keyfile)\n\t}\n\t/* Slurp the file */\n\tb, err := ioutil.ReadFile(keyfile)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\t/* Turn it into a signer */\n\ts, err := ssh.ParsePrivateKey(b)\n\treturn s, err\n}", "func getKeyFromVault() ([]byte, error) {\n\tif key == nil {\n\t\t// TODO: read key from Vault\n\t\tkey = []byte(\"LOOKMEUPINEXTERNALSYSTEM\")\n\t}\n\treturn key, nil\n}", "func LienByAddressKey(addr sdk.AccAddress) []byte {\n\treturn addr.Bytes()\n}", "func NewKeyAndStoreToFile(passphrase string, keyDir string) (*Key, string) {\n\tkey := NewKey()\n\tkeyDataJSON, err := key.MarshalJSON(passphrase)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error encrypting key: %v\", err)\n\t}\n\tfileName, err := common.WriteDataToFile(keyDataJSON, filepath.Join(keyDir, createFileName(key.KeyPair.Address)))\n\tif err != nil {\n\t\tlog.Fatalf(\"Error writing keystore file: %v\", err)\n\t}\n\treturn key, fileName\n}", "func (c CLI) AddKeyToFileBackend(name, mnemonic, bip39Passphrase, cliDir string) (*string, error) {\n\tpasswordReader, passwordWriter, _ := os.Pipe()\n\n\tkb, err := keyring.New(sdk.KeyringServiceName(), \"file\", cliDir, passwordReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thdpath := *hd.NewFundraiserParams(0, sdk.CoinType, 0)\n\n\t_, err = passwordWriter.WriteString(bip39Passphrase + \"\\n\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = passwordWriter.WriteString(bip39Passphrase + \"\\n\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := kb.NewAccount(name, mnemonic, bip39Passphrase, hdpath.String(), hd.Secp256k1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tko, err := keyring.Bech32KeyOutput(info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout, err := yaml.Marshal(&[]keyring.KeyOutput{ko})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\toutYaml := string(out)\n\n\treturn &outYaml, nil\n}", "func (o DatastoreFileshareOutput) AccountKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DatastoreFileshare) pulumi.StringPtrOutput { return v.AccountKey }).(pulumi.StringPtrOutput)\n}", "func EkgKey(domain, id []byte) []byte {\n return createKey(EKG,domain,id)\n}", "func getHostKey(fn string) (ssh.Signer, error) {\n\t/* Read the key from the file */\n\tbs, err := ioutil.ReadFile(fn)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\t/* Parse it */\n\treturn ssh.ParsePrivateKey(bs)\n}", "func main() {\r\n\tdir := os.Args[1]\r\n\tpassphrase := os.Args[2]\r\n\r\n\tfmt.Println(\"Add new account to key store: \",dir,\" passphrase: \",passphrase)\r\n\r\n\tks := keystore.NewKeyStore(dir, 262144, 1)\r\n\tif a,err := ks.NewAccount(passphrase); err!=nil {\r\n\t\tfmt.Println(err)\r\n\t} else {\r\n\t\tfmt.Println(a.Address.Hex())\r\n\t}\r\n}", "func keyBytes() []byte {\n\tkeyBytes, err := ioutil.ReadFile(*keyFile)\n\tapp.FatalIfError(err, \"unable to read key file\")\n\treturn keyBytes\n}", "func clientKey(filepath string) string {\n\tkey, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't read key.pem:\", err)\n\t\tos.Exit(1)\n\t}\n\treturn string(key)\n}", "func AuthKey() []byte {\n\treturn store.AuthKey\n}", "func (userdata *User) RetrieveAccessToken(accessToken uuid.UUID) (fileKeyData FileKey, err error) {\r\n\tsharedInviteDS, fileKeyFound := userlib.DatastoreGet(accessToken)\r\n\tvar fileKeyNil FileKey\r\n\tif !fileKeyFound {\r\n\t\treturn fileKeyNil, errors.New(\"Access token did not find a shared file.\")\r\n\t}\r\n\r\n\tvar sharedInvite ShareInvite\r\n\terr = json.Unmarshal(sharedInviteDS, &sharedInvite)\r\n\r\n\tif err != nil {\r\n\t\treturn fileKeyNil, errors.New(\"Error unmarshaling shared file key.\")\r\n\t}\r\n\r\n\t//now verify that sharedInvite has not been tampered with\r\n\tvar senderKey userlib.PKEEncKey\r\n\tsenderKey, _ = userlib.KeystoreGet(sharedInvite.Sender + \"ds\")\r\n\r\n\terr = userlib.DSVerify(senderKey, sharedInvite.RSAFileKey, sharedInvite.Signature)\r\n\r\n\tif err != nil {\r\n\t\treturn fileKeyNil, errors.New(\"Failed to verify sender.\")\r\n\t}\r\n\r\n\t//now we can finally receive the fileKey after unmarshaling\r\n\t//trying to decrypt marshaled RSAFileKey\r\n\trsaFK_dec, err := userlib.PKEDec(userdata.PrivRSAKey, sharedInvite.RSAFileKey)\r\n\tif err != nil {\r\n\t\treturn fileKeyNil, errors.New(\"Failed to decrypt FileKeyMeta info.\")\r\n\t}\r\n\r\n\tvar rsaFK FileKeyMeta\r\n\terr = json.Unmarshal(rsaFK_dec, &rsaFK)\r\n\tif err != nil {\r\n\t\treturn fileKeyNil, errors.New(\"Error unmarshaling file key metadata.\")\r\n\t}\r\n\r\n\t//now lets retrieve the fileKey from the datastore and append that to our users filespace\r\n\tfileKey, fkFound := userlib.DatastoreGet(rsaFK.DSid)\r\n\r\n\tif !fkFound {\r\n\t\treturn fileKeyNil, errors.New(\"couldn't find shared file key\")\r\n\t}\r\n\r\n\t//authenticate HMAC, decrypt, depad, demarshal fileKey and add to users filespace\r\n\tlen_fk := len(fileKey) - userlib.HashSizeBytes\r\n\tif len_fk < 0 || len_fk > len(fileKey) || len(fileKey[:len_fk]) < userlib.HashSizeBytes {\r\n\t\t//automatically return error, file has been changed\r\n\t\treturn fileKeyNil, errors.New(\"File key data length has changed.\")\r\n\t}\r\n\r\n\tcomputedMac, _ := userlib.HMACEval(rsaFK.HMACkey, fileKey[:len_fk])\r\n\tif !userlib.HMACEqual(computedMac, fileKey[len_fk:]) {\r\n\t\treturn fileKeyNil, errors.New(\"File key struct has been tampered with in Datastore.\")\r\n\t}\r\n\t//decrypt\r\n\tfileKey_dec := userlib.SymDec(rsaFK.ENCkey, fileKey[:len_fk])\r\n\tfileKey_dec = PKCS(fileKey_dec, \"remove\")\r\n\terr = json.Unmarshal(fileKey_dec, &fileKeyData)\r\n\r\n\tif err != nil {\r\n\t\treturn fileKeyNil, errors.New(\"Error unmarshaling actual file key.\")\r\n\t}\r\n\r\n\treturn fileKeyData, nil\r\n}", "func (a BuildBlock) openKeyFile() *string {\n\tvar err error\n\tvar f []byte\n\tf, err = ioutil.ReadFile(a.runpath + a.Filename + \".key\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Cannot open key file: %s\\n\", err)\n\t\treturn nil\n\t}\n\tfile := string(f)\n\treturn &file\n}", "func KeyToPath(key string) (string) {\n\tpath, err := base64.StdEncoding.DecodeString(key)\n\tif err != nil {\n\t\treturn \"\"\n\t} else {\n\t\treturn string(path)\n\t}\n}", "func HexKeyToAddress(hexKey string) common.Address {\n\tkey, _ := HexToSM2(hexKey)\n\tpubBytes := SM2PubBytes(&key.PublicKey)\n\tsm3digest := sm3.Hash(pubBytes)\n\treturn common.BytesToAddress(sm3digest[12:])\n}", "func (acc *Account) Key() []byte {\n\treturn []byte(acc.Email)\n}", "func keyToAddr(key *secp256k1.PrivateKey, net *chaincfg.Params) (dcrutil.Address, error) {\n\tpubKey := (*secp256k1.PublicKey)(&key.PublicKey)\n\tserializedKey := pubKey.SerializeCompressed()\n\tpubKeyAddr, err := dcrutil.NewAddressSecpPubKey(serializedKey, net)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pubKeyAddr.AddressPubKeyHash(), nil\n}", "func DirectKey(domain,id []byte) []byte {\n return createKey(DIRECT,domain,id)\n}", "func (k *Keeper) addKeyToKeeper(client cmd.HTTPClient, privKeyHex string) (string, error) {\n\tprivkey, err := crypto.HexToECDSA(utils.RemoveHexPrefix(privKeyHex))\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to decode priv key %s: %v\", privKeyHex, err)\n\t}\n\taddress := crypto.PubkeyToAddress(privkey.PublicKey).Hex()\n\tlog.Printf(\"importing keeper key %s\", address)\n\tkeyJSON, err := ethkey.FromPrivateKey(privkey).ToEncryptedJSON(defaultChainlinkNodePassword, utils.FastScryptParams)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to encrypt piv key %s: %v\", privKeyHex, err)\n\t}\n\timportUrl := url.URL{\n\t\tPath: \"/v2/keys/evm/import\",\n\t}\n\tquery := importUrl.Query()\n\n\tquery.Set(\"oldpassword\", defaultChainlinkNodePassword)\n\tquery.Set(\"evmChainID\", fmt.Sprint(k.cfg.ChainID))\n\n\timportUrl.RawQuery = query.Encode()\n\tresp, err := client.Post(importUrl.String(), bytes.NewReader(keyJSON))\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to import priv key %s: %v\", privKeyHex, err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 400 {\n\t\tbody, err := io.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to read error response body: %s\", err)\n\t\t}\n\n\t\treturn \"\", fmt.Errorf(\"unable to create ocr2keeper job: '%v' [%d]\", string(body), resp.StatusCode)\n\t}\n\n\treturn address, nil\n}", "func (a tlsCredentials) getKeyFilename() string {\n\treturn a.keySecret.Key\n}", "func (s ServiceClientWrapper) ShowKey(name string, password string) (addr string, err error) {\n\t_, address, err := s.ServiceClient.Find(name, password)\n\treturn address.String(), err\n}", "func PathToKey(path string) (string) {\n\treturn base64.StdEncoding.EncodeToString([]byte(path))\n}", "func GetMapKey(UsernameHashed, PasswordHashed string) []byte {\r\n\treturn []byte(path.Join(keyPrefix4SubTree, UsernameHashed, PasswordHashed))\r\n}", "func GenerateAccountKey(addr string) string {\n\treturn fmt.Sprintf(\"account_addr_%s\", addr)\n}", "func GrantStoreKey(grantee, granter sdk.AccAddress, msgType string) []byte {\n\tm := conv.UnsafeStrToBytes(msgType)\n\tgranter = address.MustLengthPrefix(granter)\n\tgrantee = address.MustLengthPrefix(grantee)\n\n\tl := 1 + len(grantee) + len(granter) + len(m)\n\tkey := make([]byte, l)\n\tcopy(key, GrantPrefix)\n\tcopy(key[1:], granter)\n\tcopy(key[1+len(granter):], grantee)\n\tcopy(key[l-len(m):], m)\n\n\treturn key\n}", "func credKey(name string) string {\n\thash := sha1.Sum([]byte(name))\n\tfirst, second, rest := hash[:2], hash[2:4], hash[4:]\n\treturn credsPathPrefix + fmt.Sprintf(\"%x/%x/%x\", first, second, rest)\n}", "func GetAccountByAddress(addr common.Address, datadir string) (ks *keystore.KeyStore, a accounts.Account, err error) {\n\tks = keystore.NewKeyStore(datadir+\"/keystore\", 262144, 1)\n\tas := ks.Accounts()\n\tfor _, v := range as {\n\t\tif v.Address == addr {\n\t\t\ta = v\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(addr) == 0 {\n\t\terr = fmt.Errorf(\"No key with address '%v'\", addr)\n\t}\n\treturn\n}", "func KeyStore(key *Key, organizationID uint, clusterName string) (secretID string, err error) {\n\tlog.Info(\"Store SSH Key to Bank Vaults\")\n\tvar createSecretRequest secret.CreateSecretRequest\n\tcreateSecretRequest.Type = secretTypes.SSHSecretType\n\tcreateSecretRequest.Name = clusterName\n\n\tcreateSecretRequest.Values = map[string]string{\n\t\tsecretTypes.User: key.User,\n\t\tsecretTypes.Identifier: key.Identifier,\n\t\tsecretTypes.PublicKeyData: key.PublicKeyData,\n\t\tsecretTypes.PublicKeyFingerprint: key.PublicKeyFingerprint,\n\t\tsecretTypes.PrivateKeyData: key.PrivateKeyData,\n\t}\n\n\tsecretID, err = secret.Store.Store(organizationID, &createSecretRequest)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Error during store: %s\", err.Error())\n\t\treturn \"\", err\n\t}\n\n\tlog.Info(\"SSH Key stored.\")\n\treturn\n}", "func writeKeyToDir(keyDir string) keypool.WriteKeyCallbackFunc {\n\treturn func(keyid []byte, usage string, marshalledKey []byte) error {\n\t\t// Write key to file in keyDir\n\t\terr := ioutil.WriteFile(filepath.Join(keyDir, fmt.Sprintf(\"%x.pubkey\", keyid)), marshalledKey, 0600)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n}", "func getKey(keyFilePath, keyHandle string) (string, string, error) {\n\tvar rKey string\n\tvar rKeyInfo string\n\tvar rErr error\n\tif keyHandle == \"\" || encryptContainerImage {\n\t\tencryptContainerImage = true\n\t\tlogrus.Debugf(\"secureoverlay2: getting key for encryption: %s \", keyHandle)\n\t\tif keyFilePath != \"\" {\n\n\t\t\tunwrappedKey, err := exec.Command(\"wpm\", \"unwrap-key\", \"-i\", keyFilePath).CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", fmt.Errorf(\"secureoverlay2: Could not get unwrapped key from the wrapped key %v\", err)\n\t\t\t}\n\t\t\tif len(unwrappedKey) == 0 {\n\t\t\t\treturn \"\", \"\", fmt.Errorf(\"secureoverlay2: unwrapped key is empty\")\n\t\t\t}\n\t\t\tkey := string(unwrappedKey)\n\t\t\tkey = strings.TrimSuffix(key, \"\\n\")\n\t\t\tkeyInfo := strings.Split(keyFilePath, \"_\")\n\t\t\trKey, rKeyInfo, rErr = key, keyInfo[1], nil\n\t\t} else {\n\t\t\trKey, rKeyInfo, rErr = \"\", \"\", fmt.Errorf(\"secureoverlay2: keyFilePath empty\")\n\t\t}\n\n\t} else {\n\t\t//fetch the key for encrypting/decrypting the image\n\t\tlogrus.Debugf(\"secureoverlay2: getting key for decryption on : %s \", keyHandle)\n\t\trKey, rKeyInfo, rErr = getKmsKeyFromKeyCache(keyHandle)\n\t}\n\n\treturn rKey, rKeyInfo, rErr\n}", "func getKey(config *viper.Viper, keyType string) ([]byte, error) {\n\tkey := config.GetString(keyType + \"Key\")\n\tif key != \"\" {\n\t\treturn []byte(key), nil\n\t}\n\tif config.GetString(keyType+\"KeyFile\") == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing %s key in the token config (%sKey or %sKeyFile)\", keyType, keyType, keyType)\n\t}\n\treturn ioutil.ReadFile(prepareFileName(config.GetString(keyType + \"KeyFile\")))\n}", "func (fs *FileStore) Get(key string, r io.ReaderFrom) error {\n\tkey = fs.mangleKey(key, false)\n\tf, err := os.Open(filepath.Join(fs.baseDir, key))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn ErrUnknownKey\n\t\t}\n\t\treturn fmt.Errorf(\"error opening key file: %w\", err)\n\t}\n\t_, err = r.ReadFrom(f)\n\tf.Close()\n\treturn err\n}", "func getKeyPath(name string) string {\n\treturn configDir + \"/hil-vpn-\" + name + \".key\"\n}", "func KeyFile(file string) ssh.AuthMethod {\n\tbuffer, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tlog.Println(\"Failed to open key file, error: \", err)\n\t}\n\n\tkey, err := ssh.ParsePrivateKey(buffer)\n\tif err != nil {\n\t\tlog.Println(\"Failed to parse key file, error: \", err)\n\t}\n\n\treturn ssh.PublicKeys(key)\n}", "func writeKeyToFile(keyBytes []byte, saveFileTo string) error {\n\terr := ioutil.WriteFile(saveFileTo, keyBytes, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (key twofishKey) Key() []byte {\n\treturn key[:]\n}", "func readKey(db *bolt.DB, name string) ([]byte, error) {\n\tkey := make([]byte, 32)\n\terr := db.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(\"settings\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// return key if exists\n\t\tk := b.Get([]byte(name))\n\t\tif k != nil {\n\t\t\tcopy(key, k)\n\t\t\treturn nil\n\t\t}\n\t\t// if key not found, generate one\n\t\t_, err = rand.Read(key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn b.Put([]byte(name), key)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn key, nil\n}", "func key() string {\n\treturn \"USERPROFILE\"\n}", "func (k *Keychain) Key() (string, error) {\n\tkey, err := k.BinKey()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(key), nil\n}", "func (u *User) SaveKeyfile() {\n\tsaveKeyfile(u.Keys, u.Path, u.Pass)\n}", "func (kv *DisKV) fileGet(shard int, key string) (string, error) {\n\tfullname := kv.shardDir(shard) + \"/key-\" + kv.encodeKey(key)\n\tcontent, err := ioutil.ReadFile(fullname)\n\treturn string(content), err\n}", "func readKey(key string) (string, error) {\n\tvar env Config\n\tif err := envconfig.Process(\"\", &env); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata, err := ioutil.ReadFile(filepath.Join(env.NSXSecretPath, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func (f *FileStore) getPathByKey(key string) string {\n\treturn filepath.Join(f.directoryPath, key)\n}", "func (f *FileStore) getPathByKey(key string) string {\n\treturn filepath.Join(f.directoryPath, key)\n}", "func (t *Thread) GetFileKey(block *repo.Block) (string, error) {\n\tkey, err := t.Decrypt(block.TargetKey)\n\tif err != nil {\n\t\tlog.Errorf(\"error decrypting key: %s\", err)\n\t\treturn \"\", err\n\t}\n\treturn string(key), nil\n}", "func getWallet(fileName string) *keystore.Key {\n\n\t// load local test wallet key, may need to pull ahead vs on-demand\n\twalletKeyJSON, err := ioutil.ReadFile(fileName)\n\n\tif err != nil {\n\t\tfmt.Printf(\"error loading the walletKey : %v\", err)\n\t}\n\t// decrypt wallet\n\twalletKey, err := keystore.DecryptKey(walletKeyJSON, os.Getenv(\"MAIN_WALLET_PW\"))\n\tif err != nil {\n\t\tfmt.Printf(\"walletKey err : %v\", err)\n\t}\n\treturn walletKey\n}", "func (sa ServiceAccount) Key() string {\n\treturn fmt.Sprintf(\"%s/%s\", sa.Namespace, sa.Name)\n}", "func AddressFromBalancesStore(key []byte) sdk.AccAddress {\n\tkv.AssertKeyAtLeastLength(key, 1+v1auth.AddrLen)\n\taddr := key[:v1auth.AddrLen]\n\tkv.AssertKeyLength(addr, v1auth.AddrLen)\n\treturn sdk.AccAddress(addr)\n}", "func getFollowingKey(me types.AccountKey, myFollowing types.AccountKey) []byte {\n\treturn append(getFollowingPrefix(me), myFollowing...)\n}", "func (s *Store) Import(privKey, passphrase string) (string, error) {\n\tprivBytes, err := hex.DecodeString(privKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tkey, err := ethcrypto.ToECDSA(privBytes)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\taccount, err := s.ks.ImportECDSA(key, passphrase)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn account.Address.Hex(), nil\n\n}", "func GetUsernameKey(address sdk.AccAddress) []byte {\n\treturn append(KeyPrefix(UsernameKey), []byte(address.String())...)\n}", "func SaveKey(pubkey, privkey, password, extradata []byte) (keyfile, mpubkey, mprivkey []byte, err error) {\n\tvar hasPass, out, secretdata []byte\n\tspriv := new([32]byte)\n\tspub := new([32]byte)\n\n\tmpubkey = pubkey\n\tmprivkey = privkey\n\tpwsalt := new([32]byte)\n\t_, err = io.ReadFull(rand.Reader, pwsalt[:])\n\tif err != nil {\n\t\treturn\n\t}\n\tif password == nil || len(password) == 0 {\n\t\thasPass = hasNoPassword\n\t\tspriv = pwsalt\n\t} else {\n\t\thasPass = hasPassword\n\t\tspriv = pwHash(pwsalt[:], password)\n\t}\n\tcurve25519.ScalarBaseMult(spub, spriv)\n\tnonce := new([24]byte)\n\trand.Read(nonce[:])\n\tsecretdata, _ = bytepack.Pack(secretdata, privkey, PrivateKey)\n\tif extradata != nil && len(extradata) > 0 {\n\t\tsecretdata, _ = bytepack.Pack(secretdata, extradata, ExtraData)\n\t}\n\ttpubkey := new([32]byte)\n\tcopy(tpubkey[:], pubkey)\n\tout = box.Seal(out, secretdata, nonce, tpubkey, spriv)\n\tkeyfile, _ = bytepack.Pack(keyfile, pwsalt[:], SaltField)\n\tkeyfile, _ = bytepack.Pack(keyfile, hasPass[:], PasswordFlagField)\n\tkeyfile, _ = bytepack.Pack(keyfile, pubkey, MyPublicKey)\n\tkeyfile, _ = bytepack.Pack(keyfile, spub[:], CryptPublicKey)\n\tkeyfile, _ = bytepack.Pack(keyfile, nonce[:], Nonce)\n\tkeyfile, _ = bytepack.Pack(keyfile, out[:], NaCLBox)\n\treturn\n}", "func GetCandidateKey(address sdk.Address) []byte {\n\treturn append(CandidateKeyPrefix, address.Bytes()...)\n}", "func key(item storage.Key) []byte {\n\treturn []byte(item.Namespace() + separator + item.ID())\n}", "func MyIdentityKey() []byte {\n\treturn identityKey.PublicKey.Serialize()\n}", "func writeKeyFile() {\n // Write key and address to a toml file\n addr, pkey := keygen()\n f, err := os.Create(\"../src/config/setup_keys.toml\")\n if err != nil {\n log.Panic(\"Could not create or open setup_keys.toml\")\n }\n defer f.Close()\n\n var s = fmt.Sprintf(\"[battery]\\naddr = \\\"%s\\\"\\npkey = \\\"%s\\\"\", addr, pkey)\n _, err2 := f.WriteString(s)\n if err2 != nil {\n log.Panic(\"Could not write your address or pkey\")\n }\n return\n}", "func MakeKey(sk StorageKey, id string) []byte {\n\tkey := []byte(id)\n\tkey = append([]byte{byte(sk)}, key...)\n\treturn key\n}", "func (b *Backend) keyToDocumentID(key []byte) string {\n\t// URL-safe base64 will not have periods or forward slashes.\n\t// This should satisfy the Firestore requirements.\n\treturn base64.URLEncoding.EncodeToString(key)\n}", "func deriveContentKey(project *Project, bucket, key string) (*storj.Key, error) {\n\tencStore := project.access.encAccess.Store\n\tderivedKey, err := encryption.DeriveContentKey(bucket, paths.NewUnencrypted(key), encStore)\n\treturn derivedKey, err\n}", "func _dbKeyForPKIDToProfileEntry(pkid *PKID) []byte {\n\tprefixCopy := append([]byte{}, _PrefixPKIDToProfileEntry...)\n\tkey := append(prefixCopy, pkid[:]...)\n\treturn key\n}", "func convertKey(key []byte) string {\n\treturn fmt.Sprintf(\"%x\", key)\n}", "func (s *sidecar) writeKey(file string, data []byte) error {\n\tb := &pem.Block{\n\t\tType: \"PRIVATE KEY\",\n\t\tBytes: data,\n\t}\n\n\tf, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, keyFileMode)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = f.Write(pem.EncodeToMemory(b))\n\tif err != nil {\n\t\terr1 := f.Close()\n\t\tif err1 != nil {\n\t\t\treturn errors.Wrap(err1, err.Error())\n\t\t}\n\t\treturn err\n\t}\n\n\terr = f.Close()\n\treturn err\n}", "func orderBookKey(orderID string) []byte {\n\treturn myposchain.ConcatKeys(OrderBookKeyPrefix, []byte{0x0}, []byte(orderID))\n}", "func opaqueProviderKey(provider string, addr string) (key string) {\n\tkey = provider\n\tif addr != \"\" {\n\t\tkey = fmt.Sprintf(\"%s:%s\", addr, provider)\n\t}\n\treturn key\n}", "func (k Key) File() string {\n\tif k.Type == PrivateKey {\n\t\treturn PrivateKeyFile(k.Usage, k.Version)\n\t}\n\treturn PublicKeyFile(k.Usage, k.IA, k.Version)\n}", "func (t *Token) loadKeyFromFile(filename string) (*ecdsa.PrivateKey, error) {\n\tbytes, err := os.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn t.passKeyFromByte(bytes)\n}", "func (s storeRepository) key(repo, name string) string {\n\treturn fmt.Sprintf(\"%s/%s\", repo, name)\n}", "func GenerateSmartContractExtKey(addr string) string {\n\treturn fmt.Sprintf(\"smartcontract_ext_addr_%s\", addr)\n}", "func RemoteAddrKey() interface{} {\n\treturn remoteAddrKey\n}", "func (k *FileKeystore) Retrieve(key string) (*SecureString, error) {\n\tk.RLock()\n\tdefer k.RUnlock()\n\n\tsecret, ok := k.secrets[key]\n\tif !ok {\n\t\treturn nil, ErrKeyDoesntExists\n\t}\n\treturn NewSecureString(secret.Value), nil\n}", "func UserBlockStoreKey(blocker string, subspace string, blockedUser string) []byte {\n\treturn append(BlockerSubspacePrefix(blocker, subspace), []byte(blockedUser)...)\n}", "func (am *ACMEManager) storageSafeUserKey(ca, email, defaultFilename, extension string) string {\n\tif email == \"\" {\n\t\temail = emptyEmail\n\t}\n\temail = strings.ToLower(email)\n\tfilename := am.emailUsername(email)\n\tif filename == \"\" {\n\t\tfilename = defaultFilename\n\t}\n\tfilename = StorageKeys.Safe(filename)\n\treturn path.Join(am.storageKeyUserPrefix(ca, email), filename+extension)\n}", "func readKey(key string, path string) (string, error) {\n\tdata, err := ioutil.ReadFile(filepath.Join(path, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func (am *ACMEIssuer) loadAccountByKey(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {\n\taccountList, err := am.config.Storage.List(ctx, am.storageKeyUsersPrefix(am.CA), false)\n\tif err != nil {\n\t\treturn acme.Account{}, err\n\t}\n\tfor _, accountFolderKey := range accountList {\n\t\temail := path.Base(accountFolderKey)\n\t\tkeyBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserPrivateKey(am.CA, email))\n\t\tif err != nil {\n\t\t\treturn acme.Account{}, err\n\t\t}\n\t\tif bytes.Equal(bytes.TrimSpace(keyBytes), bytes.TrimSpace(privateKeyPEM)) {\n\t\t\treturn am.loadAccount(ctx, am.CA, email)\n\t\t}\n\t}\n\treturn acme.Account{}, fs.ErrNotExist\n}", "func KeyFile(path string) Opt {\n\treturn func(p *params) { p.keyFile = path }\n}", "func (w *Wallet) ToKey(address string) (keys.PublicKey, error) {\n\treturn keys.PublicKey{}, nil\n}", "func (c *FilesBuilder) AddKeyPair(ctx context.Context, name string,\n\tf func(context.Context, *cke.Node) (cert, key []byte, err error)) error {\n\tvar mu sync.Mutex\n\tcertMap := make(map[string][]byte)\n\tkeyMap := make(map[string][]byte)\n\n\tenv := well.NewEnvironment(ctx)\n\tfor _, n := range c.nodes {\n\t\tn := n\n\t\tenv.Go(func(ctx context.Context) error {\n\t\t\tcertData, keyData, err := f(ctx, n)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmu.Lock()\n\t\t\tcertMap[n.Address] = certData\n\t\t\tkeyMap[n.Address] = keyData\n\t\t\tmu.Unlock()\n\t\t\treturn nil\n\t\t})\n\t}\n\tenv.Stop()\n\terr := env.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.files = append(c.files, fileData{name + \".crt\", certMap})\n\tc.files = append(c.files, fileData{name + \".key\", keyMap})\n\treturn nil\n}", "func LoadKey(keyfile, password []byte) (pubkey, privkey, extradata []byte, err error) {\n\tvar fields map[int][]byte\n\tvar out []byte\n\tnonce := new([24]byte)\n\tspriv := new([32]byte)\n\trpub := new([32]byte)\n\tfields, err = bytepack.UnpackAll(keyfile)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !bytepack.VerifyFields(fields, []int{SaltField, PasswordFlagField, MyPublicKey, CryptPublicKey, Nonce, NaCLBox}) {\n\t\terr = ErrMissingField\n\t\treturn\n\t}\n\tif bytes.Equal(fields[PasswordFlagField], hasNoPassword) {\n\t\tcopy(spriv[:], fields[SaltField])\n\t} else if password == nil || len(password) == 0 {\n\t\terr = ErrMissingPassword\n\t\treturn\n\t} else {\n\t\tspriv = pwHash(fields[SaltField], password)\n\t}\n\tcopy(nonce[:], fields[Nonce])\n\tcopy(rpub[:], fields[MyPublicKey])\n\tpubkey = fields[MyPublicKey]\n\txdata, ok := box.Open(out, fields[NaCLBox], nonce, rpub, spriv)\n\tif !ok {\n\t\terr = ErrWrongPassword\n\t\treturn\n\t}\n\tsecretfields, err := bytepack.UnpackAll(xdata)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !bytepack.VerifyFields(secretfields, []int{PrivateKey}) {\n\t\terr = ErrMissingField\n\t\treturn\n\t}\n\tprivkey = secretfields[PrivateKey]\n\tif bytepack.VerifyFields(secretfields, []int{ExtraData}) {\n\t\textradata = secretfields[ExtraData]\n\t\treturn\n\t}\n\textradata = nil\n\treturn\n}", "func getKeyFromKeyCache(keyHandle string) (string, string, error) {\n\n\tctxkey := ctx.Value(keyHandle)\n\tif ctxkey == nil || ctxkey == \"\" {\n\t\tconn, err := net.Dial(\"unix\", RPCSocketFilePath)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"secureoverlay2: Failed to dial workload-agent wlagent.sock\")\n\t\t}\n\t\tclient := rpc.NewClient(conn)\n\t\tdefer client.Close()\n\t\tvar outKey KeyInfo\n\t\tvar args = KeyInfo{\n\t\t\tKeyID: keyHandle,\n\t\t}\n\t\terr = client.Call(\"VirtualMachine.FetchKey\", &args, &outKey)\n\t\tif err != nil {\n\t\t\tlogrus.Tracef(\"%+v\", err)\n\t\t\treturn \"\", \"\", fmt.Errorf(\"secureoverlay2: rpc call workload-agent fetch-key: Client call failed\")\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"Could not fetch the key from workload-agent\")\n\t\t}\n\t\tif len(outKey.Key) == 0 {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"Empty key received from workload-agent\")\n\t\t}\n\t\tunwrappedKey := base64.StdEncoding.EncodeToString(outKey.Key)\n\t\tctx = context.WithValue(context.TODO(), keyHandle, unwrappedKey)\n\t\treturn unwrappedKey, \"\", nil\n\t}\n\treturn fmt.Sprintf(\"%v\", ctx.Value(keyHandle)), \"\", nil\n\n}", "func EncryptedKeyFile(file, password string) ssh.AuthMethod {\n\tbuffer, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tlog.Println(\"Failed to open key file, error: \", err)\n\t}\n\n\tkey, err := ssh.ParsePrivateKeyWithPassphrase(buffer, []byte(password))\n\tif err != nil {\n\t\tlog.Println(\"Failed to parse key file, error: \", err)\n\t}\n\n\treturn ssh.PublicKeys(key)\n}", "func (f *File) makeKey(subkeys ...interface{}) (val string) {\n\n\tkeys := make([]string, len(subkeys)+1)\n\n\tkeys[0] = string(f.PathHash)\n\n\tfor i, value := range subkeys {\n\t\tkeys[i+1] = fmt.Sprintf(\"%s\", value)\n\t}\n\n\treturn strings.Join(keys, KEY_SEPARATOR)\n}", "func (f *fileStore) SaveKeyPair(p *Pair) error {\n\tif err := Save(f.privateKeyFile, p, true); err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"Saved the key : %s at %s\\n\", p.Public.Addr, f.publicKeyFile) //nolint\n\treturn Save(f.publicKeyFile, p.Public, false)\n}", "func keyify(key string) []byte {\n\th := fnv.New64a()\n\th.Write([]byte(key))\n\tb := make([]byte, 0, 8)\n\treturn h.Sum(b)\n}", "func (r *KeyRing) Key(id string) (*Key, error) {\n\t// NB: GnuPG allows any of the following to be used:\n\t// - Hex ID (we support)\n\t// - Email (we support)\n\t// - Substring match on OpenPGP User Name (we support if first two fail)\n\t// - Fingerprint\n\t// - OpenPGP User Name (\"Name (Comment) <email>\")\n\t// - Partial email\n\t// - Subject DN (x509)\n\t// - Issuer DN (x509)\n\t// - Keygrip (40 hex digits)\n\n\thexID, err := strconv.ParseInt(id, 16, 64)\n\tif err == nil {\n\t\tk := r.entities.KeysById(uint64(hexID))\n\t\tl := len(k)\n\t\tif l > 1 {\n\t\t\treturn nil, fmt.Errorf(\"required one key, got %d\", l)\n\t\t}\n\t\tif l == 1 {\n\t\t\treturn &Key{entity: k[0].Entity, PassphraseFetcher: r.PassphraseFetcher}, nil\n\t\t}\n\t\t// Else fallthrough and try a string-based lookup\n\t}\n\n\t// If we get here, there was no key found when looking by hex ID.\n\t// So we try again by string name in the email field. We also do weak matching\n\t// at the same time.\n\tweak := map[[20]byte]*openpgp.Entity{}\n\tfor _, e := range r.entities {\n\t\tfor _, ident := range e.Identities {\n\t\t\t// XXX Leave this commented section\n\t\t\t// It is not clear whether we should skip identities that were not self-signed\n\t\t\t// with the Sign flag on. Since the entity is at a higher level than the identity,\n\t\t\t// it seems like we are more interested in the entity's capability than the\n\t\t\t// identity the user requested, and we can always walk the subkeys to see if\n\t\t\t// any of those are allowed to sign. So I am leaving this commented.\n\t\t\t//if !ident.SelfSignature.FlagSign {\n\t\t\t//\tcontinue\n\t\t\t//}\n\t\t\tif ident.UserId.Email == id {\n\t\t\t\treturn &Key{entity: e, PassphraseFetcher: r.PassphraseFetcher}, nil\n\t\t\t}\n\t\t\tif strings.Contains(ident.Name, id) {\n\t\t\t\tweak[e.PrimaryKey.Fingerprint] = e\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch len(weak) {\n\tcase 0:\n\t\treturn nil, errors.New(\"key not found\")\n\tcase 1:\n\t\tfor _, first := range weak {\n\t\t\treturn &Key{entity: first, PassphraseFetcher: r.PassphraseFetcher}, nil\n\t\t}\n\t}\n\treturn nil, errors.New(\"multiple matching keys found\")\n}", "func (u User) Key() []byte {\n\treturn []byte(u.Username)\n}", "func (o FluxConfigurationBlobStorageOutput) AccountKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FluxConfigurationBlobStorage) *string { return v.AccountKey }).(pulumi.StringPtrOutput)\n}", "func (f *File) Key(key string) *File {\n\tf.key = key\n\treturn f\n}" ]
[ "0.63449895", "0.61992145", "0.60549915", "0.5879409", "0.5833465", "0.58311737", "0.5829052", "0.57962936", "0.57795167", "0.5744459", "0.573525", "0.5691633", "0.56843644", "0.56740224", "0.5651475", "0.5645965", "0.5644781", "0.5644164", "0.5636174", "0.56352246", "0.56232715", "0.5593557", "0.55745333", "0.55691785", "0.55651814", "0.5561834", "0.5549958", "0.5540391", "0.55327046", "0.5500048", "0.54952765", "0.5476472", "0.5466202", "0.5453119", "0.5449751", "0.54407763", "0.5436062", "0.541174", "0.54101723", "0.5381675", "0.5379456", "0.5370657", "0.53550434", "0.53547066", "0.5345763", "0.5342727", "0.53282213", "0.53224826", "0.5318468", "0.53026575", "0.52874404", "0.52853304", "0.5246307", "0.5244859", "0.524423", "0.52251595", "0.52251595", "0.5212785", "0.52117676", "0.52093", "0.5204275", "0.52012396", "0.5189525", "0.5187287", "0.5175884", "0.5157958", "0.5157035", "0.5142912", "0.51378006", "0.5131658", "0.51295286", "0.51239747", "0.5121753", "0.51208496", "0.51124847", "0.5110387", "0.5109175", "0.5106551", "0.51043624", "0.5103633", "0.5102309", "0.50966007", "0.50960433", "0.5092104", "0.50907904", "0.5089966", "0.5086617", "0.5080965", "0.50766194", "0.5072947", "0.50719684", "0.5069177", "0.5068053", "0.5066534", "0.50653", "0.50636286", "0.50575227", "0.50564164", "0.5050929", "0.50397414" ]
0.6400484
0
skip gitInformation or gitStatus, git gitInformation is not cheap, we may want to avoid this in some case, eg. Chromium.
func skip(key, path string, user *user.User, conf *config.Config) bool { for _, exclude := range vcsSetting(key, conf) { if strings.HasPrefix(path, exclude) { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func HugoNoGitInfo() error {\n\tldflags = noGitLdflags\n\treturn Hugo()\n}", "func (r *Repo) AssertNotTracked() error {\n\tplatCfg, err := config.Platform()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tremote, err := platCfg.GitRemoteName()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tout, err := gitCMD(\"remote\", \"show\", \"-n\")\n\tif err != nil {\n\t\treturn errors.Annotate(err, out)\n\t}\n\n\tparts := strings.Split(out, \"\\n\")\n\tfor _, p := range parts {\n\t\tif p == remote {\n\t\t\treturn errors.Errorf(\"%s git remote already exists\", r)\n\t\t}\n\t}\n\treturn nil\n}", "func gitInfo(dir string) (string, time.Time) {\n\tfname := dir + \"/.git/HEAD\"\n\tbuf, err := os.ReadFile(fname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbuf = bytes.TrimSpace(buf)\n\tvar githash string\n\tif len(buf) == 40 {\n\t\tgithash = string(buf[:40])\n\t} else if bytes.HasPrefix(buf, []byte(\"ref: \")) {\n\t\tfname = dir + \"/.git/\" + string(buf[5:])\n\t\tbuf, err = os.ReadFile(fname)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tgithash = string(buf[:40])\n\t} else {\n\t\tlog.Fatalf(\"githash cannot be recovered from %s\", fname)\n\t}\n\tloadTime := time.Now()\n\treturn githash, loadTime\n}", "func isNeedIgnoreGitlab(eventType string, payload api.WebhookGitlab) bool {\n\tif api.GitlabWebhookPush == eventType {\n\t\tif payload[\"checkout_sha\"] == nil {\n\t\t\treturn true\n\t\t}\n\t} else if api.GitlabWebhookPullRequest == eventType {\n\t\tattributes := payload[\"object_attributes\"].(map[string]interface{})\n\t\tif attributes[\"state\"] != api.PRActionOpened {\n\t\t\treturn true\n\t\t}\n\t} else if api.GitlabWebhookRelease == eventType {\n\t\t// Create a version in UI, it will create a tag in repo automatically.\n\t\t// Need to ignore it.\n\t\tif isAutoCreateReleaseTagGitlab(payload) {\n\t\t\treturn true\n\t\t}\n\t\tif payload[\"checkout_sha\"] == nil {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func skipVerify() *exec.Cmd {\n\treturn exec.Command(\n\t\t\"git\",\n\t\t\"config\",\n\t\t\"--global\",\n\t\t\"http.sslVerify\",\n\t\t\"false\",\n\t)\n}", "func TestDiff_skipGitSrc(t *testing.T) {\n\ts := t.TempDir()\n\td := t.TempDir()\n\n\terr := os.Mkdir(filepath.Join(s, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(s, \"a1\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\t// files that just happen to start with .git should not be ignored.\n\terr = os.WriteFile(\n\t\tfilepath.Join(s, \".gitlab-ci.yml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\t// git should be ignored\n\terr = os.Mkdir(filepath.Join(s, \".git\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(s, \".git\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\terr = os.Mkdir(filepath.Join(d, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(d, \"a1\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\terr = os.WriteFile(\n\t\tfilepath.Join(d, \".gitlab-ci.yml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\tdiff, err := Diff(s, d)\n\tassert.NoError(t, err)\n\tassert.Empty(t, diff.List())\n}", "func (b *taskBuilder) usesGit() {\n\tb.cache(CACHES_GIT...)\n\tif b.matchOs(\"Win\") || b.matchExtraConfig(\"Win\") {\n\t\tb.cipd(specs.CIPD_PKGS_GIT_WINDOWS_AMD64...)\n\t} else if b.matchOs(\"Mac\") || b.matchExtraConfig(\"Mac\") {\n\t\tb.cipd(specs.CIPD_PKGS_GIT_MAC_AMD64...)\n\t} else {\n\t\tb.cipd(specs.CIPD_PKGS_GIT_LINUX_AMD64...)\n\t}\n}", "func main() {\n\trepoPath := filepath.Join(os.Getenv(\"GOPATH\"), \"src/github.com/libgit2/git2go\")\n\tgitRepo, err := git.OpenRepository(repoPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcommitOid, err := gitRepo.Head()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tblob, _ := gitRepo.LookupBlob(commitOid.Target())\n\tlog.Println(blob)\n\t// commit, err := gitRepo.LookupCommit(commitOid)\n\tcommit, err := gitRepo.LookupCommit(commitOid.Target())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcommitTree, err := commit.Tree()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\toptions, err := git.DefaultDiffOptions()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\toptions.IdAbbrev = 40\n\toptions.InterhunkLines = 0\n\toptions.Flags = git.DiffIncludeUntracked\n\tvar parentTree *git.Tree\n\tif commit.ParentCount() > 0 {\n\t\tparentTree, err = commit.Parent(0).Tree()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\tgitDiff, err := gitRepo.DiffTreeToTree(parentTree, commitTree, &options)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfindOpts, err := git.DefaultDiffFindOptions()\n\tfindOpts.Flags = git.DiffFindBreakRewrites\n\terr = gitDiff.FindSimilar(&findOpts)\n\n\t// Show all file patch diffs in a commit.\n\tfiles := make([]string, 0)\n\thunks := make([]git.DiffHunk, 0)\n\tlines := make([]git.DiffLine, 0)\n\tpatches := make([]string, 0)\n\terr = gitDiff.ForEach(func(file git.DiffDelta, progress float64) (git.DiffForEachHunkCallback, error) {\n\t\tpatch, err := gitDiff.Patch(len(patches))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer patch.Free()\n\t\tpatchStr, err := patch.String()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpatches = append(patches, patchStr)\n\n\t\tfiles = append(files, file.OldFile.Path)\n\t\treturn func(hunk git.DiffHunk) (git.DiffForEachLineCallback, error) {\n\t\t\thunks = append(hunks, hunk)\n\t\t\treturn func(line git.DiffLine) error {\n\t\t\t\tlines = append(lines, line)\n\t\t\t\treturn nil\n\t\t\t}, nil\n\t\t}, nil\n\t}, git.DiffDetailLines)\n\n\tlog.Println(\"files: \", files, \"\\n\")\n\tlog.Println(\"hunks: \", hunks, \"\\n\")\n\tlog.Println(\"lines: \", lines, \"\\n\")\n\tlog.Println(\"patches: \", patches, \"\\n\")\n}", "func setNoImportAndCloneRepo(config *gctsDeployOptions, cloneRepoOptions *gctsCloneRepositoryOptions, httpClient piperhttp.Sender, telemetryData *telemetry.CustomData) error {\n\tlog.Entry().Infof(\"Setting VCS_NO_IMPORT to true\")\n\tnoImportConfig := setConfigKeyBody{\n\t\tKey: \"VCS_NO_IMPORT\",\n\t\tValue: \"X\",\n\t}\n\tsetConfigKeyErr := setConfigKey(config, httpClient, &noImportConfig)\n\tif setConfigKeyErr != nil {\n\t\tlog.Entry().WithError(setConfigKeyErr).Error(\"step execution failed at Set Config key for VCS_NO_IMPORT\")\n\t\treturn setConfigKeyErr\n\t}\n\tcloneErr := cloneRepository(cloneRepoOptions, telemetryData, httpClient)\n\n\tif cloneErr != nil {\n\t\tlog.Entry().WithError(cloneErr).Error(\"step execution failed at Clone Repository\")\n\t\treturn cloneErr\n\t}\n\treturn nil\n}", "func GetNewGitIgnoreTemplate(cc CodeConfig) string {\n\treturn `\n# See http://help.github.com/ignore-files/ for more about ignoring files.\n#\n# If you find yourself ignoring temporary files generated by your text editor\n# or operating system, you probably want to add a global ignore instead:\n# git config --global core.excludesfile '~/.gitignore_global'\n\n# Ignore tags\n/tags\n\n# Ignore tmp\n/tmp\n\n# Ignore test coverage files\n*.coverprofile\n*coverage.out\n\n# Ignore swap files\n*.swp\n*.swo\n\n# Ignore config files\n# /config.json`\n}", "func prepareGitCommand(command string, formating ...interface{}) *exec.Cmd {\n\tif formating != nil {\n\t\tcommand = fmt.Sprintf(command, formating...)\n\t}\n\targs := strings.Split(command, \" \")\n\tcmd := exec.Command(\"git\", args...)\n\tcmd.Stderr = os.Stderr\n\treturn cmd\n}", "func GitUntracked(path string) int {\n\targs := []string{\"ls-files\", \"--others\", \"--exclude-standard\"}\n\toutput := common.GitRun(path, args, true)\n\treturn len(strings.Split(string(output), \"\\n\")) - 1\n}", "func (c *tpmManagerBinary) nonsensitiveStatusIgnoreCache(ctx context.Context) ([]byte, error) {\n\treturn c.call(ctx, \"status\", \"--nonsensitive\", \"--ignore_cache\")\n}", "func gitState(repoExisted bool, out string) repoState {\n\tif !repoExisted {\n\t\treturn stateNew\n\t}\n\tif lines := strings.Split(out, \"\\n\"); len(lines) > 2 {\n\t\treturn stateChanged\n\t}\n\treturn stateUnchanged\n}", "func cloneIgnoreFilesIfNotExist() {\n\tif exists(ignoreFileDir) {\n\t\treturn\n\t}\n\n\tfmt.Println(\"Cloning gitginore files. This may take a while...\")\n\n\tif err := os.Mkdir(workDir, 0777); err != nil {\n\t\tif !os.IsExist(err) {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tcmd := exec.Command(\"git\", \"clone\", \"[email protected]:github/gitignore.git\", ignoreFileDir)\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(\"Done.\")\n}", "func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository {\n\tmeta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message)\n\treturn repository\n}", "func MocksForFindGit(ctx context.Context, cmd *exec.Command) error {\n\tif strings.Contains(cmd.Name, \"git\") && len(cmd.Args) == 1 && cmd.Args[0] == \"--version\" {\n\t\t_, err := cmd.CombinedOutput.Write([]byte(\"git version 99.99.1\"))\n\t\treturn err\n\t}\n\treturn nil\n}", "func (l *GitLocation) GitInfo() testdefinition.GitInfo {\n\treturn l.gitInfo\n}", "func GitRepositoryProgressing(repository GitRepository) GitRepository {\n\trepository.Status.ObservedGeneration = repository.Generation\n\trepository.Status.URL = \"\"\n\trepository.Status.Conditions = []metav1.Condition{}\n\tmeta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, \"reconciliation in progress\")\n\treturn repository\n}", "func (repo *Repo) scanUncommitted() error {\n\t// load up alternative config if possible, if not use manager's config\n\tif repo.Manager.Opts.RepoConfig {\n\t\tcfg, err := repo.loadRepoConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trepo.config = cfg\n\t}\n\n\tif err := repo.setupTimeout(); err != nil {\n\t\treturn err\n\t}\n\n\tr, err := repo.Head()\n\tif err == plumbing.ErrReferenceNotFound {\n\t\t// possibly an empty repo, or maybe its not, either way lets scan all the files in the directory\n\t\treturn repo.scanEmpty()\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tscanTimeStart := time.Now()\n\n\tc, err := repo.CommitObject(r.Hash())\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Staged change so the Commit details do not yet exist. Insert empty defaults.\n\tc.Hash = plumbing.Hash{}\n\tc.Message = \"***STAGED CHANGES***\"\n\tc.Author.Name = \"\"\n\tc.Author.Email = \"\"\n\tc.Author.When = time.Unix(0, 0).UTC()\n\n\tprevTree, err := c.Tree()\n\tif err != nil {\n\t\treturn err\n\t}\n\twt, err := repo.Worktree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstatus, err := wt.Status()\n\tfor fn, state := range status {\n\t\tvar (\n\t\t\tprevFileContents string\n\t\t\tcurrFileContents string\n\t\t\tfilename string\n\t\t)\n\n\t\tif state.Staging != git.Untracked {\n\t\t\tif state.Staging == git.Deleted {\n\t\t\t\t// file in staging has been deleted, aka it is not on the filesystem\n\t\t\t\t// so the contents of the file are \"\"\n\t\t\t\tcurrFileContents = \"\"\n\t\t\t} else {\n\t\t\t\tworkTreeBuf := bytes.NewBuffer(nil)\n\t\t\t\tworkTreeFile, err := wt.Filesystem.Open(fn)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif _, err := io.Copy(workTreeBuf, workTreeFile); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcurrFileContents = workTreeBuf.String()\n\t\t\t\tfilename = workTreeFile.Name()\n\t\t\t}\n\n\t\t\t// get files at HEAD state\n\t\t\tprevFile, err := prevTree.File(fn)\n\t\t\tif err != nil {\n\t\t\t\tprevFileContents = \"\"\n\n\t\t\t} else {\n\t\t\t\tprevFileContents, err = prevFile.Contents()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif filename == \"\" {\n\t\t\t\t\tfilename = prevFile.Name\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tdmp := diffmatchpatch.New()\n\t\t\tdiffs := dmp.DiffCleanupSemantic(dmp.DiffMain(prevFileContents, currFileContents, false))\n\t\t\tvar diffContents string\n\t\t\tfor _, d := range diffs {\n\t\t\t\tif d.Type == diffmatchpatch.DiffInsert {\n\t\t\t\t\tdiffContents += fmt.Sprintf(\"%s\\n\", d.Text)\n\t\t\t\t}\n\t\t\t}\n\t\t\trepo.CheckRules(&Bundle{\n\t\t\t\tContent: diffContents,\n\t\t\t\tFilePath: filename,\n\t\t\t\tCommit: c,\n\t\t\t\tscanType: uncommittedScan,\n\t\t\t})\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\trepo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))\n\treturn nil\n}", "func TestDiff_skipGitDest(t *testing.T) {\n\ts := t.TempDir()\n\td := t.TempDir()\n\n\terr := os.Mkdir(filepath.Join(s, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(s, \"a1\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\terr = os.Mkdir(filepath.Join(d, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(d, \"a1\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\t// git should be ignored\n\terr = os.Mkdir(filepath.Join(d, \".git\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(d, \".git\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\tdiff, err := Diff(s, d)\n\tassert.NoError(t, err)\n\tassert.Empty(t, diff.List())\n}", "func IsLocalNonBareGitRepository(fs fs.FileSystem, dir string) (bool, error) {\n\t_, err := fs.Stat(filepath.Join(dir, \".git\"))\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func (v Repository) IsBare() bool {\n\treturn v.workDir == \"\"\n}", "func (o *RunOptions) Git() gits.Gitter {\n\tif o.Gitter == nil {\n\t\to.Gitter = gits.NewGitCLI()\n\t}\n\treturn o.Gitter\n}", "func (self *WorkingTreeCommands) DiscardAnyUnstagedFileChanges() error {\n\tcmdArgs := NewGitCmd(\"checkout\").Arg(\"--\", \".\").\n\t\tToArgv()\n\n\treturn self.cmd.New(cmdArgs).Run()\n}", "func (self *WorkingTreeCommands) Exclude(filename string) error {\n\treturn self.os.AppendLineToFile(\".git/info/exclude\", filename)\n}", "func GitStatus() ([]string, error) {\n\tvar (\n\t\terr error\n\t\tmodfiles []string\n\t)\n\n\t// Git status command\n\t// gsArgs := []string{\"diff\", \"--staged\", \"--name-status\"}\n\t// gitStatus := exec.Command(\"git\", gsArgs...)\n\tgsArgs := []string{\"status\", \"--short\"}\n\tgitStatus := exec.Command(\"git\", gsArgs...)\n\n\t// Get stdout and trim the empty last index\n\tfileStatus, err := gitStatus.CombinedOutput()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfsSplit := strings.Split(string(fileStatus), \"\\n\")\n\n\tfor _, status := range fsSplit {\n\n\t\ts := strings.Fields(status)\n\t\tif len(s) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// With the \"git status --short\" command the staged files are at 0 index.\n\t\t// Unstaged: \" M filename\" has a space before the M.\n\t\t// Staged: \"M filename\" has no space before the M.\n\t\t//\n\t\t// If there is a space we do not want to include it in the commit.\n\t\tif unicode.IsSpace(rune(status[0])) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// With the \"git status --short\" command the staged files are at 0 index.\n\t\t// Unstaged: \"?? directory/\"\n\t\t//\n\t\t// If there is a question mark that means that it is untracked and unstaged.\n\t\tif unicode.IsPunct(rune(status[0])) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the file was deleted there is no reason to read that file.\n\t\tif strings.Contains(s[0], \"D\") {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Only get the file path.\n\t\tmodfiles = append(modfiles, s[len(s)-1])\n\t}\n\n\treturn modfiles, err\n}", "func GitMerge(pr *github.PullRequest, message string) error {\n\n\tfilepath := git.GetRepositoryFilePath(pr.Head.Repository.FullName)\n\tremoteRepositoryURL := git.GenerateCloneURL(pr.Head.Repository.FullName)\n\n\tif !git.Exists(filepath) {\n\t\tif _, err := git.Clone(remoteRepositoryURL); err != nil {\n\t\t\tpr.PostComment(\"I could not pull \" + pr.Head.Repository.FullName + \" from GitHub.\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := git.Fetch(filepath); err != nil {\n\t\tgit.Prune(filepath)\n\t\tpr.PostComment(\"I could not fetch the latest changes from GitHub. Please try again in a few minutes.\")\n\t\treturn err\n\t}\n\n\tif err := git.Checkout(filepath, pr.Head.Ref); err != nil {\n\t\tpr.PostComment(\"I could not checkout \" + pr.Head.Ref + \" locally.\")\n\t\treturn err\n\t}\n\n\tif err := git.Reset(filepath, path.Join(\"origin\", pr.Head.Ref)); err != nil {\n\t\tpr.PostComment(\"I could not checkout \" + pr.Head.Ref + \" locally.\")\n\t\treturn err\n\t}\n\n\tif err := git.Config(filepath, \"user.name\", git.GetName()); err != nil {\n\t\tpr.PostComment(\"I could run git config for user.name on the server.\")\n\t\treturn err\n\t}\n\n\tif err := git.Config(filepath, \"user.email\", git.GetEmail()); err != nil {\n\t\tpr.PostComment(\"I could run git config for user.email on the server.\")\n\t\treturn err\n\t}\n\n\tif err := git.Rebase(filepath, path.Join(\"origin\", pr.Base.Ref)); err != nil {\n\t\tpr.PostComment(\"I could not rebase \" + pr.Head.Ref + \" with \" + pr.Base.Ref + \". There are conflicts.\")\n\t\treturn err\n\t}\n\n\tif err := git.Push(filepath, pr.Head.Ref); err != nil {\n\t\tpr.PostComment(\"I could not push the changes to \" + pr.Base.Ref + \".\")\n\t\treturn err\n\t}\n\n\tif err := git.Checkout(filepath, pr.Base.Ref); err != nil {\n\t\tpr.PostComment(\"I could not checkout \" + pr.Base.Ref + \" locally.\")\n\t\treturn err\n\t}\n\n\tif err := git.Fetch(filepath); err != nil {\n\t\tgit.Prune(filepath)\n\t\tpr.PostComment(\"I could not fetch the latest changes from GitHub. Please try again in a few minutes.\")\n\t\treturn err\n\t}\n\n\tif err := git.Reset(filepath, path.Join(\"origin\", pr.Base.Ref)); err != nil {\n\t\tpr.PostComment(\"I could not checkout \" + pr.Base.Ref + \" locally.\")\n\t\treturn err\n\t}\n\n\tif err := git.Merge(filepath, pr.Head.Ref, message); err != nil {\n\n\t\tpr.PostComment(\"I could not merge \" + pr.Head.Ref + \" into \" + pr.Base.Ref + \".\" + \"\\nNext time use your fingers for more than just picking your nose.\")\n\t\treturn err\n\t}\n\n\tif err := git.Push(filepath, pr.Base.Ref); err != nil {\n\t\tpr.PostComment(\"I could not push the changes to \" + pr.Base.Ref + \".\")\n\t\treturn err\n\t}\n\n\tpr.PostComment(\"I just merged \" + pr.Head.Ref + \" into \" + pr.Base.Ref+ \"\\nProbably you heard this before:\\nI did it, but I did not enjoy it...\")\n\treturn nil\n}", "func LocalNonBareGitRepositoryIsEmpty(fs fs.FileSystem, dir string) (bool, error) {\n\tgitPath := filepath.Join(dir, \".git\")\n\n\tfi, err := fs.Stat(gitPath)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif !fi.IsDir() {\n\t\tgitPath, err = followGitSubmodule(fs, gitPath)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t// Search for any file in .git/{objects,refs}. We don't just search the\n\t// base .git directory because of the hook samples that are normally\n\t// generated with `git init`\n\tfound := false\n\tfor _, dir := range []string{\"objects\", \"refs\"} {\n\t\terr := fs.Walk(filepath.Join(gitPath, dir), func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif !info.IsDir() {\n\t\t\t\tfound = true\n\t\t\t}\n\n\t\t\tif found {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif found {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func isNeedIgnore(eventType string, payload api.WebhookGithub) bool {\n\tif api.GithubWebhookPush == eventType {\n\t\t// Create a version in UI, it will create a tag in repo automatically.\n\t\t// Need to ignore it.\n\t\tif isAutoCreateReleaseTag(payload) {\n\t\t\treturn true\n\t\t}\n\t} else if api.GithubWebhookPullRequest == eventType {\n\t\tif payload[api.GithubWebhookFlagAction] != api.PRActionOpened &&\n\t\t\tpayload[api.GithubWebhookFlagAction] != api.PRActionSynchronize {\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\t// Do Nothing.\n\t}\n\n\treturn false\n}", "func StatusLong(c *Client, files []File, untracked StatusUntrackedMode, lineprefix string) (string, error) {\n\t// If no head commit: \"no changes yet\", else branch info\n\t// Changes to be committed: dgit diff-index --cached HEAD\n\t// Unmerged: git ls-files -u\n\t// Changes not staged: dgit diff-files\n\t// Untracked: dgit ls-files -o\n\tvar ret string\n\tindex, _ := c.GitDir.ReadIndex()\n\thasStaged := false\n\n\tvar lsfiles []File\n\tif len(files) == 0 {\n\t\tlsfiles = []File{File(c.WorkDir)}\n\t} else {\n\t\tlsfiles = files\n\t}\n\t// Start by getting a list of unmerged and keeping them in a map, so\n\t// that we can exclude them from the non-\"unmerged\"\n\tunmergedMap := make(map[File]bool)\n\tunmerged, err := LsFiles(c, LsFilesOptions{Unmerged: true}, lsfiles)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, f := range unmerged {\n\t\tfname, err := f.PathName.FilePath(c)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tunmergedMap[fname] = true\n\t}\n\n\tvar staged []HashDiff\n\thasCommit := false\n\tif head, err := c.GetHeadCommit(); err != nil {\n\t\t// There is no head commit to compare against, so just say\n\t\t// everything in the cache (which isn't unmerged) is new\n\t\tstaged, err := LsFiles(c, LsFilesOptions{Cached: true}, lsfiles)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tvar stagedMsg string\n\t\tif len(staged) > 0 {\n\t\t\thasStaged = true\n\t\t\tfor _, f := range staged {\n\t\t\t\tfname, err := f.PathName.FilePath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tif _, ok := unmergedMap[fname]; ok {\n\t\t\t\t\t// There's a merge conflict, it'l show up in \"Unmerged\"\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tstagedMsg += fmt.Sprintf(\"%v\\tnew file:\\t%v\\n\", lineprefix, fname)\n\t\t\t}\n\t\t}\n\n\t\tif stagedMsg != \"\" {\n\t\t\tret += fmt.Sprintf(\"%vChanges to be committed:\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git rm --cached <file>...\\\" to unstage)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t\tret += stagedMsg\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t}\n\t} else {\n\t\thasCommit = true\n\t\tstaged, err = DiffIndex(c, DiffIndexOptions{Cached: true}, index, head, files)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\t// Staged\n\tif len(staged) > 0 {\n\t\thasStaged = true\n\n\t\tstagedMsg := \"\"\n\t\tfor _, f := range staged {\n\t\t\tfname, err := f.Name.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tif _, ok := unmergedMap[fname]; ok {\n\t\t\t\t// There's a merge conflict, it'l show up in \"Unmerged\"\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif f.Src == (TreeEntry{}) {\n\t\t\t\tstagedMsg += fmt.Sprintf(\"%v\\tnew file:\\t%v\\n\", lineprefix, fname)\n\t\t\t} else if f.Dst == (TreeEntry{}) {\n\t\t\t\tstagedMsg += fmt.Sprintf(\"%v\\tdeleted:\\t%v\\n\", lineprefix, fname)\n\t\t\t} else {\n\t\t\t\tstagedMsg += fmt.Sprintf(\"%v\\tmodified:\\t%v\\n\", lineprefix, fname)\n\t\t\t}\n\t\t}\n\t\tif stagedMsg != \"\" {\n\t\t\tret += fmt.Sprintf(\"%vChanges to be committed:\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git reset HEAD <file>...\\\" to unstage)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t\tret += stagedMsg\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t}\n\t}\n\n\t// We already did the LsFiles for the unmerged, so just iterate over\n\t// them.\n\tif len(unmerged) > 0 {\n\t\tret += fmt.Sprintf(\"%vUnmerged paths:\\n\", lineprefix)\n\t\tret += fmt.Sprintf(\"%v (use \\\"git reset HEAD <file>...\\\" to unstage)\\n\", lineprefix)\n\t\tret += fmt.Sprintf(\"%v (use \\\"git add <file>...\\\" to mark resolution)\\n\", lineprefix)\n\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\n\t\tfor i, f := range unmerged {\n\t\t\tfname, err := f.PathName.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tswitch f.Stage() {\n\t\t\tcase Stage1:\n\t\t\t\tswitch unmerged[i+1].Stage() {\n\t\t\t\tcase Stage2:\n\t\t\t\t\tif i >= len(unmerged)-2 {\n\t\t\t\t\t\t// Stage3 is missing, we've reached the end of the index.\n\t\t\t\t\t\tret += fmt.Sprintf(\"%v\\tdeleted by them:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tswitch unmerged[i+2].Stage() {\n\t\t\t\t\tcase Stage3:\n\t\t\t\t\t\t// There's a stage1, stage2, and stage3. If they weren't all different, read-tree would\n\t\t\t\t\t\t// have resolved it as a trivial stage0 merge.\n\t\t\t\t\t\tret += fmt.Sprintf(\"%v\\tboth modified:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t// Stage3 is missing, but we haven't reached the end of the index.\n\t\t\t\t\t\tret += fmt.Sprintf(\"%v\\tdeleted by them:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\tcase Stage3:\n\t\t\t\t\t// Stage2 is missing\n\t\t\t\t\tret += fmt.Sprintf(\"%v\\tdeleted by us:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(\"Unhandled index\")\n\t\t\t\t}\n\t\t\tcase Stage2:\n\t\t\t\tif i == 0 || unmerged[i-1].Stage() != Stage1 {\n\t\t\t\t\t// If this is a Stage2, and the previous wasn't Stage1,\n\t\t\t\t\t// then we know the next one must be Stage3 or read-tree\n\t\t\t\t\t// would have handled it as a trivial merge.\n\t\t\t\t\tret += fmt.Sprintf(\"%v\\tboth added:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t}\n\t\t\t\t// If the previous was Stage1, it was handled by the previous\n\t\t\t\t// loop iteration.\n\t\t\t\tcontinue\n\t\t\tcase Stage3:\n\t\t\t\t// There can't be just a Stage3 or read-tree would\n\t\t\t\t// have resolved it as Stage0. All cases were handled\n\t\t\t\t// by Stage1 or Stage2\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\t// If ls-files -u returned something other than\n\t\t\t\t// Stage1-3, there's an unrelated bug somewhere.\n\t\t\t\tpanic(\"Invalid unmerged stage\")\n\t\t\t}\n\t\t}\n\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t}\n\t// Not staged changes\n\tnotstaged, err := DiffFiles(c, DiffFilesOptions{}, lsfiles)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thasUnstaged := false\n\tif len(notstaged) > 0 {\n\t\thasUnstaged = true\n\t\tnotStagedMsg := \"\"\n\t\tfor _, f := range notstaged {\n\t\t\tfname, err := f.Name.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tif _, ok := unmergedMap[fname]; ok {\n\t\t\t\t// There's a merge conflict, it'l show up in \"Unmerged\"\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif f.Src == (TreeEntry{}) {\n\t\t\t\tnotStagedMsg += fmt.Sprintf(\"%v\\tnew file:\\t%v\\n\", lineprefix, fname)\n\t\t\t} else if f.Dst == (TreeEntry{}) {\n\t\t\t\tnotStagedMsg += fmt.Sprintf(\"%v\\tdeleted:\\t%v\\n\", lineprefix, fname)\n\t\t\t} else {\n\t\t\t\tnotStagedMsg += fmt.Sprintf(\"%v\\tmodified:\\t%v\\n\", lineprefix, fname)\n\t\t\t}\n\t\t}\n\t\tif notStagedMsg != \"\" {\n\t\t\tret += fmt.Sprintf(\"%vChanges not staged for commit:\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git add <file>...\\\" to update what will be committed)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git checkout -- <file>...\\\" to discard changes in working directory)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t\tret += notStagedMsg\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t}\n\t}\n\n\thasUntracked := false\n\tif untracked != StatusUntrackedNo {\n\t\tlsfilesopts := LsFilesOptions{\n\t\t\tOthers: true,\n\t\t\tExcludeStandard: true, // Configurable some day\n\t\t}\n\t\tif untracked == StatusUntrackedNormal {\n\t\t\tlsfilesopts.Directory = true\n\t\t}\n\n\t\tuntracked, err := LsFiles(c, lsfilesopts, lsfiles)\n\t\tif len(untracked) > 0 {\n\t\t\thasUntracked = true\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif len(untracked) > 0 {\n\t\t\tret += fmt.Sprintf(\"%vUntracked files:\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git add <file>...\\\" to include in what will be committed)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\n\t\t\tfor _, f := range untracked {\n\t\t\t\tfname, err := f.PathName.FilePath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t\tif fname.IsDir() {\n\t\t\t\t\tret += fmt.Sprintf(\"%v\\t%v/\\n\", lineprefix, fname)\n\t\t\t\t} else {\n\t\t\t\t\tret += fmt.Sprintf(\"%v\\t%v\\n\", lineprefix, fname)\n\t\t\t\t}\n\t\t\t}\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t}\n\t} else {\n\t\tif hasUnstaged {\n\t\t\tret += fmt.Sprintf(\"%vUntracked files not listed (use -u option to show untracked files)\\n\", lineprefix)\n\t\t}\n\t}\n\tvar summary string\n\tswitch {\n\tcase hasStaged && hasUntracked && hasCommit:\n\tcase hasStaged && hasUntracked && !hasCommit:\n\tcase hasStaged && !hasUntracked && hasCommit && !hasUnstaged:\n\tcase hasStaged && !hasUntracked && hasCommit && hasUnstaged:\n\t\tif untracked != StatusUntrackedNo {\n\t\t\tsummary = `no changes added to commit (use \"git add\" and/or \"git commit -a\")`\n\t\t}\n\tcase hasStaged && !hasUntracked && !hasCommit:\n\tcase !hasStaged && hasUntracked && hasCommit:\n\t\tfallthrough\n\tcase !hasStaged && hasUntracked && !hasCommit:\n\t\tsummary = `nothing added to commit but untracked files present (use \"git add\" to track)`\n\tcase !hasStaged && !hasUntracked && hasCommit && !hasUnstaged:\n\t\tsummary = \"nothing to commit, working tree clean\"\n\tcase !hasStaged && !hasUntracked && hasCommit && hasUnstaged:\n\t\tsummary = `no changes added to commit (use \"git add\" and/or \"git commit -a\")`\n\tcase !hasStaged && !hasUntracked && !hasCommit:\n\t\tsummary = `nothing to commit (create/copy files and use \"git add\" to track)`\n\tdefault:\n\t}\n\tif summary != \"\" {\n\t\tret += lineprefix + summary + \"\\n\"\n\t}\n\treturn ret, nil\n}", "func (git *Git) Prepare() {\n\tdir := pRegExp.ReplaceAllString(git.Name, \"-\")\n\n\th := sha1.New()\n\th.Write([]byte(git.Source[\"reference\"]))\n\n\tref := hex.EncodeToString(h.Sum(nil))\n\tref = ref[:6]\n\n\ttemp := dir + \"-\" + git.Version + \"-\" + ref\n\tgit.SourcePath = path.Join(git.DistDir, strings.Replace(temp, \"/\", \"-\", -1))\n\tgit.PathExist = false\n\n\t_, err := os.Stat(git.SourcePath)\n\tif err == nil {\n\t\tgit.PathExist = true\n\t}\n}", "func getCheckoutMissingStatus(snapshotID string) (*google_rpc_status.Status, error) {\n\tinputRoot, err := bazel.DigestFromSnapshotID(snapshotID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to convert SnapshotID %s to Digest: %s\", snapshotID, err)\n\t}\n\treturn getFailedPreconditionStatus([]*remoteexecution.Digest{inputRoot})\n}", "func (g *GitLocal) DiscoverUpstreamGitURL(gitConf string) (string, error) {\n\treturn g.GitCLI.DiscoverUpstreamGitURL(gitConf)\n}", "func (g *GitLocal) GetCommitsNotOnAnyRemote(dir string, branch string) ([]GitCommit, error) {\n\treturn g.GitCLI.GetCommitsNotOnAnyRemote(dir, branch)\n}", "func (j *DSGit) GetGitOps(ctx *Ctx, thrN int) (ch chan error, err error) {\n\tworker := func(c chan error, url string) (e error) {\n\t\tdefer func() {\n\t\t\tif c != nil {\n\t\t\t\tc <- e\n\t\t\t}\n\t\t}()\n\t\tvar (\n\t\t\tsout string\n\t\t\tserr string\n\t\t)\n\t\tcmdLine := []string{GitOpsCommand, url}\n\t\tvar env map[string]string\n\t\tif GitOpsNoCleanup {\n\t\t\tenv = map[string]string{\"SKIP_CLEANUP\": \"1\"}\n\t\t}\n\t\tsout, serr, e = ExecCommand(ctx, cmdLine, \"\", env)\n\t\tif e != nil {\n\t\t\tif GitOpsFailureFatal {\n\t\t\t\tPrintf(\"error executing %v: %v\\n%s\\n%s\\n\", cmdLine, e, sout, serr)\n\t\t\t} else {\n\t\t\t\tPrintf(\"WARNING: error executing %v: %v\\n%s\\n%s\\n\", cmdLine, e, sout, serr)\n\t\t\t\te = nil\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\ttype resultType struct {\n\t\t\tLoc int `json:\"loc\"`\n\t\t\tPls []RawPLS `json:\"pls\"`\n\t\t}\n\t\tvar data resultType\n\t\te = jsoniter.Unmarshal([]byte(sout), &data)\n\t\tif e != nil {\n\t\t\tif GitOpsFailureFatal {\n\t\t\t\tPrintf(\"error unmarshaling from %v\\n\", sout)\n\t\t\t} else {\n\t\t\t\tPrintf(\"WARNING: error unmarshaling from %v\\n\", sout)\n\t\t\t\te = nil\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tj.Loc = data.Loc\n\t\tfor _, f := range data.Pls {\n\t\t\tfiles, _ := strconv.Atoi(f.Files)\n\t\t\tblank, _ := strconv.Atoi(f.Blank)\n\t\t\tcomment, _ := strconv.Atoi(f.Comment)\n\t\t\tcode, _ := strconv.Atoi(f.Code)\n\t\t\tj.Pls = append(\n\t\t\t\tj.Pls,\n\t\t\t\tPLS{\n\t\t\t\t\tLanguage: f.Language,\n\t\t\t\t\tFiles: files,\n\t\t\t\t\tBlank: blank,\n\t\t\t\t\tComment: comment,\n\t\t\t\t\tCode: code,\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\t\treturn\n\t}\n\tif thrN <= 1 {\n\t\treturn nil, worker(nil, j.URL)\n\t}\n\tch = make(chan error)\n\tgo func() { _ = worker(ch, j.URL) }()\n\treturn ch, nil\n}", "func TestVoltBuildGitNoVimRepos(t *testing.T) {\n\ttestBuildMatrix(t, voltBuildGitNoVimRepos)\n}", "func EnsureGitIsFromCIPD(ctx context.Context) error {\n\tgit, _, _, err := FindGit(ctx)\n\tif err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\tif !IsFromCIPD(git) {\n\t\treturn skerr.Fmt(\"Git does not appear to be obtained via CIPD: %s\", git)\n\t}\n\treturn nil\n}", "func iRunGitcleanInThatRepo() error {\n\terr := RunGitClean(gitWorkingDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (ss *Sources) gitFetch(spec v1.SourceSpec) (string, error) {\n\tp := ss.repoPath(spec)\n\t_, err := os.Stat(p)\n\tif os.IsNotExist(err) {\n\t\t// Clone new repo.\n\t\td, _ := filepath.Split(p)\n\t\terr = os.MkdirAll(d, 0750)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t_, _, err = exe.Run(ss.Log, &exe.Opt{Dir: d}, \"\", \"git\", \"clone\", urlWithToken(spec.URL, spec.Token))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t_, _, err = exe.Run(ss.Log, &exe.Opt{Dir: p}, \"\", \"git\", \"checkout\", spec.Ref)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tss.Log.Info(\"GIT-clone\", \"url\", spec.URL, \"ref\", spec.Ref)\n\t} else {\n\t\t// Pull existing repo content.\n\t\t_, _, err = exe.Run(ss.Log, &exe.Opt{Dir: p}, \"\", \"git\", \"pull\", \"origin\", spec.Ref)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tss.Log.V(2).Info(\"GIT-pull\", \"url\", spec.URL, \"ref\", spec.Ref)\n\t}\n\n\t// Get hash.\n\th, _, err := exe.Run(ss.Log, &exe.Opt{Dir: p}, \"\", \"git\", \"rev-parse\", spec.Ref)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\th = strings.TrimRight(h, \"\\n\\r\")\n\tif len(h) == 0 {\n\t\treturn \"\", fmt.Errorf(\"expected git hash\")\n\t}\n\n\treturn h, nil\n}", "func (j *DSGit) CustomFetchRaw() bool {\n\treturn false\n}", "func (h *stiGit) GetInfo(repo string) *SourceInfo {\n\tgit := func(arg ...string) string {\n\t\tcommand := exec.Command(\"git\", arg...)\n\t\tcommand.Dir = repo\n\t\tout, err := command.CombinedOutput()\n\t\tif err != nil {\n\t\t\tlog.V(1).Infof(\"Error executing 'git %#v': %s (%v)\", arg, out, err)\n\t\t\treturn \"\"\n\t\t}\n\t\treturn strings.TrimSpace(string(out))\n\t}\n\treturn &SourceInfo{\n\t\tLocation: git(\"config\", \"--get\", \"remote.origin.url\"),\n\t\tRef: git(\"rev-parse\", \"--abbrev-ref\", \"HEAD\"),\n\t\tCommitID: git(\"rev-parse\", \"--verify\", \"HEAD\"),\n\t\tAuthorName: git(\"--no-pager\", \"show\", \"-s\", \"--format=%an\", \"HEAD\"),\n\t\tAuthorEmail: git(\"--no-pager\", \"show\", \"-s\", \"--format=%ae\", \"HEAD\"),\n\t\tCommitterName: git(\"--no-pager\", \"show\", \"-s\", \"--format=%cn\", \"HEAD\"),\n\t\tCommitterEmail: git(\"--no-pager\", \"show\", \"-s\", \"--format=%ce\", \"HEAD\"),\n\t\tDate: git(\"--no-pager\", \"show\", \"-s\", \"--format=%ad\", \"HEAD\"),\n\t\tMessage: git(\"--no-pager\", \"show\", \"-s\", \"--format=%<(80,trunc)%s\", \"HEAD\"),\n\t}\n}", "func (st *buildStatus) useKeepGoingFlag() bool {\n\t// For now, keep going for post-submit builders on release branches,\n\t// because we prioritize seeing more complete test results over failing fast.\n\t// Later on, we may start doing this all post-submit builders on all branches.\n\t// See golang.org/issue/14305.\n\t//\n\t// TODO(golang.org/issue/36181): A more ideal long term solution is one that reports\n\t// a failure fast, but still keeps going to make all other test results available.\n\treturn !st.isTry() && strings.HasPrefix(st.branch(), \"release-branch.go\")\n}", "func (g *git) gitPull() {\n\n\twTree, err := g.Repo.Worktree()\n\tif err != nil {\n\t\tlog.Fatalf(\"[Pull] Failed get work tree: %s\\n\", err.Error())\n\t}\n\n\terr = wTree.Pull(&go_git.PullOptions{\n\t\tReferenceName: plumbing.ReferenceName(g.Branch),\n\t\tAuth: g.Auth,\n\t\tSingleBranch: true,\n\t\tProgress: os.Stdout,\n\t\t//Force: true,\n\t})\n\n\tif err == nil {\n\t\tlog.Printf(\"[Pull] Success!\\n\")\n\t} else {\n\t\tswitch err {\n\t\tcase go_git.ErrUnstagedChanges:\n\t\t\tif g.Force {\n\t\t\t\tlog.Printf(\"[Pull] Info: %s (local repository changed).\\nRule for forced pull - %v. Force pulling...\", err.Error(), g.Force)\n\t\t\t\terr := wTree.Reset(&go_git.ResetOptions{\n\t\t\t\t\tMode: go_git.ResetMode(1),\n\t\t\t\t})\n\t\t\t\tif err == nil {\n\t\t\t\t\tlog.Printf(\"[Pull] Success!\\n\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"[Pull] Error: %s\\n\", err.Error())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[Pull] Error: %s (local repository changed).\\nRule for forced pull - %v. Can`t force pull.\\n\", err.Error(), g.Force)\n\t\t\t}\n\t\tcase go_git.NoErrAlreadyUpToDate:\n\t\t\tlog.Printf(\"[Pull] Nothing to pull: %s\\n\", err.Error())\n\t\tdefault:\n\t\t\tlog.Printf(\"[Pull] Error: %s\\n\", err.Error())\n\t\t}\n\t}\n\n\t// show last commit\n\tref, err := g.Repo.Head()\n\tif err != nil {\n\t\tlog.Printf(\"Failed get reference where HEAD is pointing to: %s\\n\", err.Error())\n\t}\n\tcommit, err := g.Repo.CommitObject(ref.Hash())\n\tif err != nil {\n\t\tlog.Printf(\"[Pull] Can`t show last commit: %s\\n\", err.Error())\n\t} else {\n\t\tlog.Printf(\"[Pull] Last commit: %s\\n\", commit)\n\t}\n}", "func TestUnknownLanguageNeedsFork(t *testing.T) {\n\tpath := fmt.Sprintf(\"%s/klone-e2e-unknown\", local.Home())\n\tt.Logf(\"Test path: %s\", path)\n\trepo, err := GitServer.GetRepoByOwner(GitServer.OwnerName(), \"klone-e2e-unknown\")\n\tif err != nil && !strings.Contains(err.Error(), \"404 Not Found\") {\n\t\tt.Fatalf(\"Unable to attempt to search for repo: %v\", err)\n\t}\n\tif repo != nil && repo.Owner() == GitServer.OwnerName() {\n\t\t_, err := GitServer.DeleteRepo(\"klone-e2e-unknown\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unable to delete repo: %v\", err)\n\t\t}\n\t}\n\terr = IdempotentKlone(path, \"Nivenly/klone-e2e-unknown\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error kloning: %v\", err)\n\t}\n\tr, err := git.PlainOpen(path)\n\tif err != nil {\n\t\tt.Fatalf(\"Error opening path: %v\", err)\n\t}\n\tremotes, err := r.Remotes()\n\tif err != nil {\n\t\tt.Fatalf(\"Error reading remotes: %v\", err)\n\t}\n\toriginOk, upstreamOk := false, false\n\tfor _, remote := range remotes {\n\t\trspl := strings.Split(remote.String(), \"\\t\")\n\t\tif len(rspl) < 3 {\n\t\t\tt.Fatalf(\"Invalid remote string: %s\", remote.String())\n\t\t}\n\t\tname := rspl[0]\n\t\turl := rspl[1]\n\t\tif strings.Contains(name, \"origin\") && strings.Contains(url, fmt.Sprintf(\"[email protected]:%s/klone-e2e-unknown.git\", GitServer.OwnerName())) {\n\t\t\toriginOk = true\n\t\t}\n\t\tif strings.Contains(name, \"upstream\") && strings.Contains(url, fmt.Sprintf(\"[email protected]:%s/klone-e2e-unknown.git\", \"Nivenly\")) {\n\t\t\tupstreamOk = true\n\t\t}\n\t}\n\tif originOk == false {\n\t\tt.Fatal(\"Error detecting remote [origin]\")\n\t}\n\tif upstreamOk == false {\n\t\tt.Fatal(\"Error detecting remote [upstream]\")\n\t}\n}", "func (a *ApplyImpl) SkipDiffOnInstall() bool {\n\treturn a.ApplyOptions.SkipDiffOnInstall\n}", "func GitIgnore(wd string, thatPath string) error {\n\tif IsIgnored(wd, thatPath) {\n\t\treturn nil\n\t}\n\n\tgitignorePath := path.Join(wd, \".gitignore\")\n\t/* #nosec */\n\tgitignore, err := os.OpenFile(\n\t\tgitignorePath,\n\t\tos.O_APPEND|os.O_CREATE|os.O_WRONLY,\n\t\t0o600,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer utils.Close(gitignore)\n\n\tcontent := fmt.Sprintf(\"\\n%s\", thatPath)\n\tif _, err = gitignore.WriteString(content); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func describeRepository(flags *pflag.FlagSet, image string) error {\n\torg, _, err := dockerhub.GetFlags(flags)\n\tif err != nil {\n\t\tcolor.Red(\"Error: %s\", err)\n\t}\n\n\trepoInfo, err := dockerhub.NewClient(org, \"\").DescribeRepository(image)\n\tif err != nil {\n\t\tcolor.Red(\"Error: %s\", err)\n\t}\n\n\tcolor.Blue(\"User: \" + repoInfo.User +\n\t\t\"\\nName: \" + repoInfo.Name +\n\t\t\"\\nNamespace: \" + repoInfo.Namespace +\n\t\t\"\\nRepositoryType: \" + repoInfo.RepositoryType +\n\t\t\"\\nStatus: \" + fmt.Sprintf(\"%d\", repoInfo.Status) +\n\t\t\"\\nDescription: \" + repoInfo.Description +\n\t\t\"\\nIsPrivate: \" + fmt.Sprintf(\"%t\", repoInfo.IsPrivate) +\n\t\t\"\\nIsAutomated: \" + fmt.Sprintf(\"%t\", repoInfo.IsAutomated) +\n\t\t\"\\nCanEdit: \" + fmt.Sprintf(\"%t\", repoInfo.CanEdit) +\n\t\t\"\\nStarCount: \" + fmt.Sprintf(\"%d\", repoInfo.StarCount) +\n\t\t\"\\nPullCount: \" + fmt.Sprintf(\"%d\", repoInfo.PullCount) +\n\t\t\"\\nLastUpdated: \" + fmt.Sprint(repoInfo.LastUpdated) +\n\t\t\"\\nIsMigrated: \" + fmt.Sprintf(\"%t\", repoInfo.IsMigrated) +\n\t\t\"\\nCollaboratorCount: \" + fmt.Sprintf(\"%d\", repoInfo.CollaboratorCount) +\n\t\t\"\\nAffiliation: \" + repoInfo.Affiliation +\n\t\t\"\\nHubUser: \" + repoInfo.HubUser)\n\n\treturn nil\n}", "func AddGitSuffixIfNecessary(url string) string {\n\tif url == \"\" || strings.HasSuffix(strings.ToLower(url), \".git\") {\n\t\treturn url\n\t}\n\tlog.Italicf(\"Adding .git to %s\", url)\n\treturn url + \".git\"\n}", "func (conf *Conf) OverwriteFromGit(repo *Repository) (err error) {\n\tbuf := bytes.NewBuffer(nil)\n\terr = repo.Git(context.Background(), nil, buf, \"config\", \"--get-regexp\", \"^bits\")\n\tif err != nil {\n\t\treturn nil //no bits conf, nothing to do\n\t}\n\n\ts := bufio.NewScanner(buf)\n\tfor s.Scan() {\n\t\tfields := strings.Fields(s.Text())\n\t\tif len(fields) < 2 {\n\t\t\treturn fmt.Errorf(\"unexpected configuration returned from git: %v\", s.Text())\n\t\t}\n\n\t\tswitch fields[0] {\n\t\tcase \"bits.deduplication-scope\":\n\t\t\tscope, err := strconv.ParseUint(fields[1], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unexpected format for configured dedup scope '%v', expected a base10 number\", fields[1])\n\t\t\t}\n\n\t\t\tconf.DeduplicationScope = scope\n\t\tcase \"bits.aws-s3-bucket-name\":\n\t\t\tconf.AWSS3BucketName = fields[1]\n\t\tcase \"bits.aws-access-key-id\":\n\t\t\tconf.AWSAccessKeyID = fields[1]\n\t\tcase \"bits.aws-secret-access-key\":\n\t\t\tconf.AWSSecretAccessKey = fields[1]\n\t\t}\n\t}\n\n\treturn nil\n}", "func (a *Forj) doDriverClean(d *drivers.Driver) error {\n\tif d.Plugin.Result == nil {\n\t\treturn fmt.Errorf(\"No driver data to cleanup. Result is empty\")\n\t}\n\tif len(d.Plugin.Result.Data.Files) == 0 {\n\t\tgotrace.Trace(\"No files to add/commit returned by the driver.\")\n\t\treturn nil\n\t}\n\tgotrace.Trace(\"----- Do GIT tasks in the INFRA repository.\")\n\n\t// Add source files\n\tif err := d.GitCleanPluginFiles(a.moveTo); err != nil {\n\t\treturn fmt.Errorf(\"Issue to add driver '%s' generated files. %s\", a.CurrentPluginDriver.Name, err)\n\t}\n\n\t// Check about uncontrolled files. Existing if one uncontrolled file is found\n\tif status := git.GetStatus(); status.Err != nil {\n\t\treturn fmt.Errorf(\"Issue to check git status. %s\", status.Err)\n\t} else {\n\t\tif num := status.CountUntracked(); num > 0 {\n\t\t\tlog.Print(\"Following files created by the plugin are not controlled by the plugin. You must fix it manually and contact the plugin maintainer to fix this issue.\")\n\t\t\tlog.Printf(\"files: %s\", strings.Join(status.Untracked(), \", \"))\n\t\t\treturn fmt.Errorf(\"Unable to complete commit process. '%d' Uncontrolled files found\", num)\n\t\t}\n\t}\n\treturn nil\n}", "func (self *WorkingTreeCommands) Ignore(filename string) error {\n\treturn self.os.AppendLineToFile(\".gitignore\", filename)\n}", "func GitVersion() string { return gitVersion }", "func checkRepository(c *Config, repos string, url string, lastTime time.Time, fd *os.File) error {\n\tdest := filepath.Join(c.Clone, repos)\n\n\t// clone the repository\n\trepository, err := git.PlainClone(dest, false, &git.CloneOptions{\n\t\tURL: url,\n\t\tProgress: os.Stdout,\n\t\tAuth: &http.BasicAuth{\n\t\t\tUsername: c.Username,\n\t\t\tPassword: c.Passwd,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\t// record the local repository path\n\tcleanPaths = append(cleanPaths, dest)\n\n\t// retrieving the branch by HEAD\n\tref, err := repository.Head()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// retrieving the commit object\n\tcommit, err := repository.CommitObject(ref.Hash())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// retrieve the tree from the commit and file list\n\ttree, err := commit.Tree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresult := map[string]time.Time{}\n\ttree.Files().ForEach(func(f *object.File) error {\n\t\t// only check the config files\n\t\tif strings.Contains(f.Name, c.Pattern) {\n\t\t\t// retrieve the commit log for the config files\n\t\t\titer, err := repository.Log(&git.LogOptions{\n\t\t\t\tFrom: ref.Hash(),\n\t\t\t\tOrder: git.LogOrderCommitterTime,\n\t\t\t\tFileName: &f.Name,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// check if the config file is changed\n\t\t\titer.ForEach(func(c *object.Commit) error {\n\t\t\t\t// pkg/misc/conf/business.yml in repository(alert-enricher) has changed in time(Tue Dec 10 13:07:28 2019 +0800)\n\t\t\t\tif c.Author.When.Sub(lastTime) > 0 {\n\t\t\t\t\tt, ok := result[f.Name]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tresult[f.Name] = c.Author.When\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif c.Author.When.Sub(t) > 0 {\n\t\t\t\t\t\t\tresult[f.Name] = c.Author.When\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tfor key, val := range result {\n\t\tline := fmt.Sprintf(\"%s in repository(%s) has changed in latest time(%s)\\n\", key, repos, val.Format(object.DateFormat))\n\t\tif _, err := fd.WriteString(line); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif len(result) > 0 {\n\t\tfd.WriteString(\"\\n\")\n\t}\n\n\treturn nil\n}", "func handleRepo(config *Config, sshURL string) {\n\tlog.Println(\"Repo clone\", sshURL)\n\n\ttempCloneName := getTempRepoName(sshURL)\n\n\tsyscall.Chdir(config.TempDir)\n\tos.RemoveAll(\"./\" + tempCloneName)\n\tdefer func() {\n\t\tsyscall.Chdir(config.TempDir)\n\t\tos.RemoveAll(\"./\" + tempCloneName)\n\t}()\n\n\t_, err_clone := exec.Command(config.GitCMD, \"clone\", \"--branch\", config.Branch, sshURL, tempCloneName).Output()\n\tif err_clone != nil {\n\t\tlog.Println(\"Repo cannot be cloned\", sshURL, err_clone)\n\t\treturn\n\t}\n\n\tout_grep, err_grep := execCmdWithOutput(config.GrepCMD, \"-rl\", \"--exclude-dir\", \".git\", config.ReplaceFrom, tempCloneName)\n\tif err_grep != nil {\n\t\tlog.Panic(err_grep)\n\t\treturn\n\t}\n\n\tout_grep_trimmed := strings.Trim(out_grep, \"\\n\\r\\t \")\n\tif out_grep_trimmed == \"\" {\n\t\tlog.Println(\"No match\")\n\t\treturn\n\t}\n\n\tfiles := strings.Split(out_grep_trimmed, \"\\n\")\n\tfor _, fileName := range files {\n\t\thandleFile(config, fileName)\n\t}\n\n\t// Make git operations safe - they have to be called from the directory\n\tmutexInRepoOp.Lock()\n\tsyscall.Chdir(\"./\" + tempCloneName)\n\tdiff, err_diff := execCmdWithOutput(config.GitCMD, \"diff\")\n\tif err_diff != nil {\n\t\tlog.Panic(err_diff)\n\t\treturn\n\t}\n\tlog.Println(diff)\n\n\tif flagCommit {\n\t\tlog.Println(\"Committing changes\")\n\t\t_, err_commit := execCmdWithOutput(config.GitCMD, \"commit\", \"-a\", \"-m\", config.CommitMessage)\n\t\tif err_commit != nil {\n\t\t\tlog.Panic(err_commit)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Println(\"Push to remote\")\n\t\t_, err_push := execCmdWithOutput(config.GitCMD, \"push\", \"origin\", config.Branch)\n\t\tif err_push != nil {\n\t\t\tlog.Panic(err_push)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"Commit and push succeed\")\n\t}\n\n\tsyscall.Chdir(config.TempDir)\n\tmutexInRepoOp.Unlock()\n}", "func FindGit(ctx context.Context) (string, int, int, error) {\n\tmtx.Lock()\n\tdefer mtx.Unlock()\n\tif git == \"\" {\n\t\tgitPath, err := osexec.LookPath(\"git\")\n\t\tif err != nil {\n\t\t\treturn \"\", 0, 0, skerr.Wrapf(err, \"Failed to find git\")\n\t\t}\n\t\tmaj, min, err := Version(ctx, gitPath)\n\t\tif err != nil {\n\t\t\treturn \"\", 0, 0, skerr.Wrapf(err, \"Failed to obtain git version\")\n\t\t}\n\t\tsklog.Infof(\"Git is %s; version %d.%d\", gitPath, maj, min)\n\t\tisFromCIPD := IsFromCIPD(gitPath)\n\t\tisFromCIPDVal := 0\n\t\tif isFromCIPD {\n\t\t\tisFromCIPDVal = 1\n\t\t}\n\t\tmetrics2.GetInt64Metric(\"git_from_cipd\").Update(int64(isFromCIPDVal))\n\t\tgit = gitPath\n\t\tgitVersionMajor = maj\n\t\tgitVersionMinor = min\n\t}\n\treturn git, gitVersionMajor, gitVersionMinor, nil\n}", "func Status() string {\n\treturn run.Capture(\"git status\")\n}", "func mustMakeGitStore(ctx context.Context, fsc *frontendServerConfig, appName string) *bt_gitstore.BigTableGitStore {\n\tif fsc.Local {\n\t\tappName = bt.TestingAppProfile\n\t}\n\tbtConf := &bt_gitstore.BTConfig{\n\t\tInstanceID: fsc.BTInstance,\n\t\tProjectID: fsc.BTProjectID,\n\t\tTableID: fsc.GitBTTable,\n\t\tAppProfile: appName,\n\t}\n\n\tgitStore, err := bt_gitstore.New(ctx, btConf, fsc.GitRepoURL)\n\tif err != nil {\n\t\tsklog.Fatalf(\"Error instantiating gitstore: %s\", err)\n\t}\n\n\treturn gitStore\n}", "func (self *Repository) Ignore(path string) error { return nil }", "func (self *Repository) Ignore(path string) error { return nil }", "func setupBareGitRepo(t *testing.T) *repositories.GitRepository {\n\tt.Helper()\n\tassert := assert.New(t)\n\n\trepoDir, err := ioutil.TempDir(\"\", \"rb-gateway-bare-repo-\")\n\tassert.Nil(err)\n\n\t_, err = git.PlainInit(repoDir, true)\n\tassert.Nil(err)\n\n\treturn &repositories.GitRepository{\n\t\tRepositoryInfo: repositories.RepositoryInfo{\n\t\t\tName: \"upstream\",\n\t\t\tPath: repoDir,\n\t\t},\n\t}\n}", "func (e *ExternalService) excludeGitoliteRepos(rs ...*Repo) error {\n\tif len(rs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn e.config(\"gitolite\", func(v interface{}) (string, interface{}, error) {\n\t\tc := v.(*schema.GitoliteConnection)\n\t\tset := make(map[string]bool, len(c.Exclude))\n\t\tfor _, ex := range c.Exclude {\n\t\t\tif ex.Name != \"\" {\n\t\t\t\tset[ex.Name] = true\n\t\t\t}\n\t\t}\n\n\t\tfor _, r := range rs {\n\t\t\trepo, ok := r.Metadata.(*gitolite.Repo)\n\t\t\tif ok && repo.Name != \"\" && !set[repo.Name] {\n\t\t\t\tc.Exclude = append(c.Exclude, &schema.ExcludedGitoliteRepo{Name: repo.Name})\n\t\t\t\tset[repo.Name] = true\n\t\t\t}\n\t\t}\n\n\t\treturn \"exclude\", c.Exclude, nil\n\t})\n}", "func (f *FileList) ConfigGit() {\n\tpaw.Logger.Debug()\n\n\tf.git = NewGitStatus(f.root)\n\tgs := f.git.GetStatus()\n\tif f.git.NoGit || len(f.store) < 1 || gs == nil {\n\t\treturn\n\t}\n\n\t// 1. check: if dir is GitIgnored, then marks all subfiles with GitIgnored.\n\tfor _, dir := range f.dirs {\n\t\tif len(f.store[dir]) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfm := f.store[dir]\n\t\tfd := fm[0]\n\t\tdrp := fd.RelPath\n\t\tisMarkIgnored := false\n\t\tisUntracked := false\n\t\tif xy, ok := gs[drp]; ok {\n\t\t\tif isXY(xy, GitIgnored) {\n\t\t\t\tisMarkIgnored = true\n\t\t\t}\n\t\t\tif isXY(xy, GitUntracked) {\n\t\t\t\tisUntracked = true\n\t\t\t}\n\t\t}\n\t\tif isMarkIgnored || isUntracked {\n\t\t\tfor _, file := range fm[1:] {\n\t\t\t\trp := file.RelPath\n\t\t\t\t// 1. continue...\n\t\t\t\tgs[rp] = &GitFileStatus{\n\t\t\t\t\tStaging: gs[drp].Staging,\n\t\t\t\t\tWorktree: gs[drp].Worktree,\n\t\t\t\t\tExtra: file.BaseName,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// 2. if any of subfiles of dir (including root) has any change of git status, set GitChanged to dir\n\tfor _, dir := range f.dirs {\n\t\tfiles := f.GetFiles(dir)\n\t\tif files == nil || len(files) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfd := files[0]\n\t\txs, ys := f.getSubXYs(files[1:], gs)\n\t\tif len(xs) > 0 || len(ys) > 0 {\n\t\t\trp := fd.RelPath\n\t\t\tgs[rp] = &GitFileStatus{\n\t\t\t\tStaging: getSC(xs),\n\t\t\t\tWorktree: getSC(ys),\n\t\t\t\tExtra: fd.BaseName + \"/\",\n\t\t\t}\n\t\t}\n\t}\n\n\tf.git.Dump(\"ConfigGit: modified\")\n}", "func TestCommitExcludes(t *testing.T) {\n\ttestclient := New(\"\")\n\t_, err := setupLocalRepoWithDirRemote(testclient)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to initialise local and remote repositories: %s\", err.Error())\n\t}\n\n\tvar fsize int64 = 1024 * 1024 // 1 MiB files, greater than annex.minsize\n\n\tfnames := []string{\n\t\t\"bigmarkdown.md\",\n\t\t\"bigpython.py\",\n\t\t\"somegitfile.git\",\n\t\t\"plaintextfile.txt\",\n\t}\n\n\tfor _, fn := range fnames {\n\t\terr = createFile(fn, fsize)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[%s] file creation failed: %s\", fn, err.Error())\n\t\t}\n\t}\n\n\taddchan := make(chan git.RepoFileStatus)\n\tgo git.AnnexAdd([]string{\".\"}, addchan)\n\tfor range addchan {\n\t}\n\n\terr = git.Commit(\"Test commit\")\n\tif err != nil {\n\t\tt.Fatalf(\"Commit failed: %s\", err.Error())\n\t}\n\n\tgitobjs, err := git.LsTree(\"HEAD\", nil)\n\tif err != nil {\n\t\tt.Fatalf(\"git ls-tree failed: %s\", err.Error())\n\t}\n\tif len(gitobjs) != len(fnames) {\n\t\tt.Fatalf(\"Expected %d git objects, got %d\", len(fnames), len(gitobjs))\n\t}\n\n\t// all file sizes in git should be fsize\n\tfor _, fn := range fnames {\n\t\tcontents, err := git.CatFileContents(\"HEAD\", fn)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Couldn't read git file contents for %s\", fn)\n\t\t}\n\t\tif int64(len(contents)) != fsize {\n\t\t\tt.Fatalf(\"Git file content size doesn't match original file size: %d (expected %d)\", len(contents), fsize)\n\t\t}\n\t}\n}", "func main() {\n\n // lang := os.Getenv(\"LANG\")\n // fmt.Println(\"Lang:\", lang)\n // known languages: en_US.UTF-8\n\n\tcurrentFolder, err := os.Getwd()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trepositoryRoot, err := ff.FindRepository(currentFolder)\n\n\tif err == nil {\n\t\tif err := os.Chdir(repositoryRoot); err != nil {\n\t\t\tlog.Fatalf(\"Could not change to repository root %q: %v\", repositoryRoot, err)\n\t\t}\n\t}\n\n\trepositoryExists := !os.IsNotExist(err)\n\tif !repositoryExists {\n\t\tfmt.Println(color.RedString(\"No repository found\"))\n\t}\n\n\tconf, _ := ff.ReadConfiguration(repositoryRoot)\n\n\tlogger := genLog(repositoryRoot)\n\n\tif !repositoryExists {\n\t\tguidedRepositoryCreation(logger, conf)\n\t} else {\n\t\tconfIndented, _ := json.MarshalIndent(conf, \"\", \" \")\n\t\tif _, err := os.Stat(\".git/ff.conf.json\"); os.IsNotExist(err) {\n\t\t\t_ = ioutil.WriteFile(\".git/ff.conf.json\", confIndented, 0644)\n\t\t}\n\t}\n\n\tDevBranchName := conf.Branches.Historical.Development\n\n\tcntxt := ff.Context{\n\t\tRepositoryRoot: repositoryRoot,\n\t\tCurrentStep: &ff.CheckTagStep{},\n\t\tLogger: logger,\n\t\tDevBranchName: DevBranchName,\n\t\tConf: conf,\n\t}\n\n\tcntxt.EnterStep()\n\n\tfor cntxt.CurrentStep.Execute(&cntxt) {\n\t\tcntxt.EnterStep()\n\t}\n}", "func (j *DSGit) FetchRaw(ctx *Ctx) (err error) {\n\tPrintf(\"%s should use generic FetchRaw()\\n\", j.DS)\n\treturn\n}", "func (suiteSJ *TestSuiteForGitWorkSJ) Test_GitClone_WithBadUrl() {\n\tsuiteSJ.singleJob.GitClone(badUrlProvided)\n\terr := Ls(PathToDirectoryForTest + suiteSJ.singleJob.commit.RepoName)\n\tif err == nil {\n\t\tsuiteSJ.FailNow(\"Git clone was successful but it should not be\")\n\t}\n}", "func (e *ExternalService) excludeGitLabRepos(rs ...*Repo) error {\n\tif len(rs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn e.config(\"gitlab\", func(v interface{}) (string, interface{}, error) {\n\t\tc := v.(*schema.GitLabConnection)\n\t\tset := make(map[string]bool, len(c.Exclude)*2)\n\t\tfor _, ex := range c.Exclude {\n\t\t\tif ex.Id != 0 {\n\t\t\t\tset[strconv.Itoa(ex.Id)] = true\n\t\t\t}\n\n\t\t\tif ex.Name != \"\" {\n\t\t\t\tset[strings.ToLower(ex.Name)] = true\n\t\t\t}\n\t\t}\n\n\t\tfor _, r := range rs {\n\t\t\tp, ok := r.Metadata.(*gitlab.Project)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tname := p.PathWithNamespace\n\t\t\tid := strconv.Itoa(p.ID)\n\n\t\t\tif !set[name] && !set[id] {\n\t\t\t\tc.Exclude = append(c.Exclude, &schema.ExcludedGitLabProject{\n\t\t\t\t\tName: name,\n\t\t\t\t\tId: p.ID,\n\t\t\t\t})\n\n\t\t\t\tif id != \"\" {\n\t\t\t\t\tset[id] = true\n\t\t\t\t}\n\n\t\t\t\tif name != \"\" {\n\t\t\t\t\tset[name] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn \"exclude\", c.Exclude, nil\n\t})\n}", "func Git(cmd string, args ...string) (res *exec.Cmd, stdout, stderr *bytes.Buffer) {\n\tcmdArgs := make([]string, 1)\n\tcmdArgs[0] = cmd\n\tcmdArgs = append(cmdArgs, args...)\n\tres = exec.Command(gitCmd, cmdArgs...)\n\tstdout, stderr = new(bytes.Buffer), new(bytes.Buffer)\n\tres.Stdout, res.Stderr = stdout, stderr\n\treturn\n}", "func GitUnignore(wd string, thatPath string) error {\n\tif !IsIgnored(wd, thatPath) {\n\t\treturn nil\n\t}\n\n\tgitignorePath := path.Join(wd, \".gitignore\")\n\tif utils.FileExists(gitignorePath) {\n\t\t/* #nosec */\n\t\tgitignore, err := os.OpenFile(gitignorePath, os.O_RDONLY, 0o644)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tscanner := bufio.NewScanner(gitignore)\n\t\tthatPath = strings.Trim(thatPath, \" \")\n\n\t\tlines := make([]string, 0)\n\t\tfor scanner.Scan() {\n\t\t\tif err := scanner.Err(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tline := scanner.Text()\n\t\t\tif strings.Trim(line, \" \") != thatPath {\n\t\t\t\tlines = append(lines, line)\n\t\t\t}\n\t\t}\n\t\tutils.Close(gitignore)\n\n\t\tcontents := []byte(strings.Join(lines, \"\\n\"))\n\n\t\tioutil.WriteFile(gitignorePath, contents, 0o600)\n\t}\n\n\treturn nil\n}", "func hookPreCommit(args []string) {\n\t// We used to bail in detached head mode, but it's very common\n\t// to be modifying things during git rebase -i and it's annoying\n\t// that those new commits made don't get the gofmt check.\n\t// Let's try keeping the hook on and see what breaks.\n\t/*\n\t\tb := CurrentBranch()\n\t\tif b.DetachedHead() {\n\t\t\t// This is an internal commit such as during git rebase.\n\t\t\t// Don't die, and don't force gofmt.\n\t\t\treturn\n\t\t}\n\t*/\n\n\thookGofmt()\n}", "func fetchInternal(bin, name, repoPath string) error {\n\targs := []string{\"fetch\", \"--prune\", name, \"+refs/*:refs/*\"}\n\tcmd := exec.Command(bin, args...)\n\tcmd.Dir = repoPath\n\tcmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func noDash(r *Repo) { r.showOnDashboard = false }", "func (adminAPIOp) SkipVerification() bool { return true }", "func SetGitCredsReadOnlyInternal(vm *gce.Instance) *gce.Instance {\n\tnewGSDownloads := make([]*gce.GSDownload, 0, len(vm.GSDownloads)+2)\n\tfor _, gsd := range vm.GSDownloads {\n\t\tif !util.In(gsd.Dest, []string{\"~/.gitcookies\", \"~/.netrc\"}) {\n\t\t\tnewGSDownloads = append(newGSDownloads, gsd)\n\t\t}\n\t}\n\tvm.GSDownloads = append(newGSDownloads, &gce.GSDownload{\n\t\tSource: GS_URL_NETRC_READONLY_INTERNAL,\n\t\tDest: \"~/.netrc\",\n\t\tMode: \"600\",\n\t})\n\treturn vm\n}", "func _detectRemoteURL_GoGit(path string) (string, error) {\n\tgitRepo, err := git.PlainOpen(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tremote, err := gitRepo.Remote(\"origin\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn remote.Config().URLs[0], nil\n}", "func (f *FakeGit) GetInfo(repo string) *git.SourceInfo {\n\treturn &git.SourceInfo{\n\t\tRef: \"master\",\n\t\tCommitID: \"1bf4f04\",\n\t\tLocation: \"file:///foo\",\n\t}\n}", "func followGitSubmodule(fs fs.FileSystem, gitPath string) (string, error) {\n\tf, err := os.Open(gitPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\tsc := bufio.NewScanner(f)\n\tif sc.Scan() {\n\t\ts := sc.Text()\n\n\t\tif strings.HasPrefix(s, \"gitdir: \") {\n\t\t\tnewGitPath := s[8:]\n\n\t\t\tif !filepath.IsAbs(newGitPath) {\n\t\t\t\tnewGitPath = filepath.Join(filepath.Dir(gitPath), newGitPath)\n\t\t\t}\n\n\t\t\tfi, err := fs.Stat(newGitPath)\n\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif os.IsNotExist(err) || !fi.IsDir() {\n\t\t\t\treturn \"\", fmt.Errorf(\"gitdir link in .git file %q is invalid\", gitPath)\n\t\t\t}\n\t\t\treturn newGitPath, nil\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"unable to parse .git file %q\", gitPath)\n}", "func GetSCMArguments(projectDir string) []string {\n\tvar info []string\n\n\t// open repository from local path\n\tlog.Debug(\"Using git repository at \" + projectDir)\n\trepository, repositoryErr := git.PlainOpen(projectDir)\n\tif repositoryErr != nil {\n\t\tlog.Warn(\"No repository!\")\n\t\treturn getDefaultInfo()\n\t}\n\n\t// get current reference\n\tref, refErr := repository.Head()\n\tif refErr != nil {\n\t\tlog.Warn(\"Empty repository!\")\n\t\treturn getDefaultInfo()\n\t}\n\tlog.Debug(\"Git Ref \" + ref.String())\n\n\t// repository kind and remote\n\tinfo = append(info, \"NCI_REPOSITORY_KIND=git\")\n\tremote, remoteErr := repository.Remote(\"origin\")\n\tlog.Debug(\"Git Remote \" + remote.String())\n\tif remoteErr == nil && remote != nil && remote.Config() != nil && len(remote.Config().URLs) > 0 {\n\t\tinfo = append(info, \"NCI_REPOSITORY_REMOTE=\"+remote.Config().URLs[0])\n\t} else {\n\t\tinfo = append(info, \"NCI_REPOSITORY_REMOTE=local\")\n\t}\n\n\t// pass\n\tif strings.HasPrefix(ref.Name().String(), \"refs/heads/\") {\n\t\t// branch\n\t\tbranchName := ref.Name().String()[11:]\n\t\tinfo = append(info, \"NCI_COMMIT_REF_TYPE=branch\")\n\t\tinfo = append(info, \"NCI_COMMIT_REF_NAME=\"+branchName)\n\t\tinfo = append(info, \"NCI_COMMIT_REF_SLUG=\"+GetSlug(branchName))\n\t} else if ref.Name().String() == \"HEAD\" {\n\t\t// detached HEAD, look into the reflog to determinate the true branch\n\t\tgitRefLogFile := projectDir + \"/.git/logs/HEAD\"\n\t\tlastLine := readLastLine(gitRefLogFile)\n\t\tlog.Debug(\"RefLog - LastLine: \" + lastLine)\n\n\t\tpattern := regexp.MustCompile(`.*checkout: moving from (?P<FROM>.*) to (?P<TO>.*)$`)\n\t\tmatch := pattern.FindStringSubmatch(lastLine)\n\t\tlog.Debug(\"Found a reflog entry showing that there was a checkout based on \" + match[1] + \" to \" + match[2])\n\n\t\tif len(match[2]) == 40 {\n\t\t\t// checkout out a specific commit, use origin branch as reference\n\t\t\tinfo = append(info, \"NCI_COMMIT_REF_TYPE=branch\")\n\t\t\tinfo = append(info, \"NCI_COMMIT_REF_NAME=\"+match[1])\n\t\t\tinfo = append(info, \"NCI_COMMIT_REF_SLUG=\"+GetSlug(match[1]))\n\t\t} else {\n\t\t\t// checkout of a tag or other named reference\n\t\t\tinfo = append(info, \"NCI_COMMIT_REF_TYPE=tag\")\n\t\t\tinfo = append(info, \"NCI_COMMIT_REF_NAME=\"+match[2])\n\t\t\tinfo = append(info, \"NCI_COMMIT_REF_SLUG=\"+GetSlug(match[2]))\n\t\t}\n\t} else {\n\t\tpanic(\"Unsupported!\")\n\t}\n\n\t// release name (=slug, but without leading v for tags)\n\tinfo = append(info, \"NCI_COMMIT_REF_RELEASE=\"+strings.TrimLeft(GetEnvironment(info, \"NCI_COMMIT_REF_SLUG\"), \"v\"))\n\n\t// commit info\n\tinfo = append(info, \"NCI_COMMIT_SHA=\"+ref.Hash().String())\n\tinfo = append(info, \"NCI_COMMIT_SHA_SHORT=\"+ref.Hash().String()[0:8])\n\n\tcIter, _ := repository.Log(&git.LogOptions{From: ref.Hash()})\n\tfirstCommit := true\n\tcIter.ForEach(func(commit *object.Commit) error {\n\t\tcommitinfo := strings.Split(commit.Message, \"\\n\")\n\n\t\t// only set for first commit\n\t\tif firstCommit {\n\t\t\tinfo = append(info, \"NCI_COMMIT_TITLE=\"+commitinfo[0])\n\t\t\tif len(commitinfo) >= 3 {\n\t\t\t\tinfo = append(info, \"NCI_COMMIT_DESCRIPTION=\"+strings.Join(commitinfo[2:], \"\\n\"))\n\t\t\t} else {\n\t\t\t\tinfo = append(info, \"NCI_COMMIT_DESCRIPTION=\")\n\t\t\t}\n\n\t\t\tfirstCommit = false\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn info\n}", "func gitAuth() *http.BasicAuth {\n\n\tvar auth *http.BasicAuth\n\n\t// The username can be anything for HTTPS Git operations\n\tgitUsername := \"fanal-aquasecurity-scan\"\n\n\t// We first check if a GitHub token was provided\n\tgithubToken := os.Getenv(\"GITHUB_TOKEN\")\n\tif githubToken != \"\" {\n\t\tauth = &http.BasicAuth{\n\t\t\tUsername: gitUsername,\n\t\t\tPassword: githubToken,\n\t\t}\n\t\treturn auth\n\t}\n\n\t// Otherwise we check if a GitLab token was provided\n\tgitlabToken := os.Getenv(\"GITLAB_TOKEN\")\n\tif gitlabToken != \"\" {\n\t\tauth = &http.BasicAuth{\n\t\t\tUsername: gitUsername,\n\t\t\tPassword: gitlabToken,\n\t\t}\n\t\treturn auth\n\t}\n\n\t// If no token was provided, we simply return a nil,\n\t// which will make the request to be unauthenticated\n\treturn nil\n\n}", "func affectedTestsNoWork(\n\tctx context.Context,\n\trunner ninjaRunner,\n\tcontextSpec *fintpb.Context,\n\tallTests []build.Test,\n\ttargets []string,\n) (affectedTestsResult, error) {\n\tresult := affectedTestsResult{\n\t\tlogs: map[string]string{},\n\t}\n\n\t// Map from \"... is dirty\" line printed by Ninja to affected test\n\ttestsByDirtyLine := map[string][]string{}\n\t// Map from test path (if defined) to test name\n\ttestsByPath := map[string]string{}\n\t// Map from path to BUILD.gn file defining the test to the test\n\ttestsByBuildGn := map[string][]string{}\n\n\tfor _, test := range allTests {\n\t\t// Ignore any tests that shouldn't be considered affected.\n\t\tlabelNoToolchain := strings.Split(test.Label, \"(\")[0]\n\t\tif contains(neverAffectedTestLabels, labelNoToolchain) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// For host tests we use the executable path.\n\t\tif test.Path != \"\" {\n\t\t\ttestsByPath[test.Path] = test.Name\n\t\t}\n\n\t\tfor _, packageManifest := range test.PackageManifests {\n\t\t\tdirtyLine := dirtyLineForPackageManifest(packageManifest)\n\t\t\ttestsByDirtyLine[dirtyLine] = append(testsByDirtyLine[dirtyLine], test.Name)\n\t\t}\n\n\t\tbuildGnPath := buildGnPathForLabel(test.Label)\n\t\ttestsByBuildGn[buildGnPath] = append(testsByBuildGn[buildGnPath], test.Name)\n\t\tif test.PackageLabel != \"\" {\n\t\t\tbuildGnPath = buildGnPathForLabel(test.PackageLabel)\n\t\t\ttestsByBuildGn[buildGnPath] = append(testsByBuildGn[buildGnPath], test.Name)\n\t\t}\n\t}\n\n\tvar gnFiles, nonGNFiles []string\n\tfor _, f := range contextSpec.ChangedFiles {\n\t\text := filepath.Ext(f.Path)\n\t\tif ext == \".gn\" || ext == \".gni\" {\n\t\t\tgnFiles = append(gnFiles, f.Path)\n\t\t} else {\n\t\t\tnonGNFiles = append(nonGNFiles, f.Path)\n\t\t}\n\t}\n\n\tvar affectedTests []string\n\tfor _, gnFile := range gnFiles {\n\t\tgnFile = strings.TrimPrefix(gnFile, \"build/secondary/\")\n\t\tmatch, ok := testsByBuildGn[gnFile]\n\t\tif ok {\n\t\t\taffectedTests = append(affectedTests, match...)\n\t\t}\n\t}\n\n\t// Our Ninja graph is set up in such a way that touching any GN files\n\t// triggers an action to regenerate the entire graph. So if GN files were\n\t// modified and we touched them then the following dry run results are not\n\t// useful for determining affected tests.\n\ttouchNonGNResult, err := touchFiles(makeAbsolute(contextSpec.CheckoutDir, nonGNFiles))\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tdefer resetTouchFiles(touchNonGNResult)\n\tstdout, stderr, err := ninjaDryRun(ctx, runner, targets)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tninjaOutput := strings.Join([]string{stdout, stderr}, \"\\n\\n\")\n\n\tfor _, line := range strings.Split(ninjaOutput, \"\\n\") {\n\t\tmatch, ok := testsByDirtyLine[line]\n\t\tif ok {\n\t\t\t// Matched an expected line\n\t\t\taffectedTests = append(affectedTests, match...)\n\t\t} else {\n\t\t\t// Look for actions that reference host test path. Different types\n\t\t\t// of host tests have different actions, but they all mention the\n\t\t\t// final executable path.\n\t\t\t// fxbug.dev(85524): tokenize with shlex in case test paths include\n\t\t\t// whitespace.\n\t\t\tfor _, maybeTestPath := range strings.Split(line, \" \") {\n\t\t\t\tmaybeTestPath = strings.Trim(maybeTestPath, `\"`)\n\t\t\t\ttestName, ok := testsByPath[maybeTestPath]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\taffectedTests = append(affectedTests, testName)\n\t\t\t}\n\t\t}\n\t}\n\n\t// For determination of \"no work to do\", we want to consider all files,\n\t// *including* GN files. If no GN files are affected, then we already have\n\t// the necessary output from the first ninja dry run, so we can skip doing\n\t// the second dry run that includes GN files.\n\tif len(gnFiles) > 0 {\n\t\tresult.logs[\"ninja dry run output (no GN files)\"] = ninjaOutput\n\n\t\t// Since we only did a Ninja dry run, the non-GN files will still be\n\t\t// considered dirty, so we need only touch the GN files.\n\t\ttouchGNResult, err := touchFiles(makeAbsolute(contextSpec.CheckoutDir, gnFiles))\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\t\tdefer resetTouchFiles(touchGNResult)\n\t\tvar stdout, stderr string\n\t\tstdout, stderr, err = ninjaDryRun(ctx, runner, targets)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\t\tninjaOutput = strings.Join([]string{stdout, stderr}, \"\\n\\n\")\n\t}\n\tresult.logs[\"ninja dry run output\"] = ninjaOutput\n\tresult.noWork = strings.Contains(ninjaOutput, noWorkString)\n\tresult.affectedTests = removeDuplicates(affectedTests)\n\n\treturn result, nil\n}", "func IsGit(path string) bool {\n\tif strings.HasSuffix(path, \".git\") || strings.HasPrefix(path, \"git@\") {\n\t\treturn true\n\t}\n\turl, err := url.Parse(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif url.Scheme == \"\" {\n\t\turl.Scheme = \"https\"\n\t}\n\tresp, err := http.Head(url.String() + \".git\")\n\tif err != nil {\n\t\treturn false\n\t}\n\tfor _, status := range []int{200, 301, 302, 401} {\n\t\tif resp.StatusCode == status {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func GetGitRepositoryDefaultBranch(url string) (string, error) {\n\terr := C.git_libgit2_init()\n\tif err < 0 {\n\t\treturn \"\", errors.New(\"failed to initialize libgit2\")\n\t}\n\tvar odb *C.git_odb\n\terr = C.git_odb_new(&odb)\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to create git_odb\")\n\t}\n\tvar repo *C.git_repository\n\terr = C.git_repository_wrap_odb(&repo, odb)\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to wrap odb into repository\")\n\t}\n\tvar remote *C.git_remote\n\terr = C.git_remote_create_anonymous(&remote, repo, C.CString(url))\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to create anonymous remote\")\n\t}\n\terr = C.git_remote_connect(remote, C.GIT_DIRECTION_FETCH, nil, nil, nil)\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to connect to remote (fetch)\")\n\t}\n\tvar remote_heads **C.git_remote_head\n\tvar remote_heads_size C.ulong\n\terr = C.git_remote_ls(&remote_heads, &remote_heads_size, remote)\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to list remote heads\")\n\t}\n\tvar remote_heads2 []*C.git_remote_head\n\tsh := (*reflect.SliceHeader)(unsafe.Pointer(&remote_heads2))\n\tsh.Data = uintptr(unsafe.Pointer(remote_heads))\n\tsh.Len = int(remote_heads_size)\n\tsh.Cap = int(remote_heads_size)\n\tfound := \"\"\n\tfor _, remote_head := range remote_heads2 {\n\t\tif remote_head.symref_target != nil {\n\t\t\t// s := C.GoString(C.git_oid_tostr_s(&remote_head.oid))\n\t\t\th := C.GoString(remote_head.name)\n\t\t\tif h != \"HEAD\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsr := C.GoString(remote_head.symref_target)\n\t\t\tsr = strings.TrimPrefix(sr, \"refs/heads/\")\n\t\t\tlog.Printf(\"[%s] Found default branch name = %s\\n\", h, sr)\n\t\t\tfound = sr\n\t\t}\n\t}\n\tC.git_remote_free(remote)\n\tC.git_repository_free(repo)\n\tC.git_odb_free(odb)\n\tC.git_libgit2_shutdown()\n\n\treturn found, nil\n}", "func init() {\n\tif debug {\n\t\tlog.SetLogLevel(log.Debug)\n\t}\n\tversion = fmt.Sprintf(\"%d.%d.%d\", major, minor, patch)\n\tclean := (gitstatus == \"0\")\n\t// The docker build will pass the git tag to the build, if it is clean\n\t// from a tag it will look like v0.7.0\n\tif tag != \"v\"+version || !clean {\n\t\tlog.Debugf(\"tag is %v, clean is %v marking as pre release\", tag, clean)\n\t\tversion += \"-pre\"\n\t}\n\tif !clean {\n\t\tbuildInfo += \"-dirty\"\n\t\tlog.Debugf(\"gitstatus is %q, marking buildinfo as dirty: %v\", gitstatus, buildInfo)\n\t}\n\tlongVersion = version + \" \" + buildInfo + \" \" + runtime.Version()\n}", "func (cmd ConfigCmd) RequiresRepo() bool {\n\treturn false\n}", "func Git(argv []string, cmdr cmd.Commander) error {\n\tusage := executable.Render(`\nValid commands for git:\n\ngit:remote Adds git remote of application to repository\ngit:remove Removes git remote of application from repository\n\nUse '{{.Name}} help [command]' to learn more.\n`)\n\n\tswitch argv[0] {\n\tcase \"git:remote\":\n\t\treturn gitRemote(argv, cmdr)\n\tcase \"git:remove\":\n\t\treturn gitRemove(argv, cmdr)\n\tcase \"git\":\n\t\tfmt.Print(usage)\n\t\treturn nil\n\tdefault:\n\t\tPrintUsage(cmdr)\n\t\treturn nil\n\t}\n}", "func (r *Repo) NonInstalledVersion(\n\tcommit string) (*newtutil.RepoVersion, error) {\n\n\tver, err := r.inferVersion(commit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ver, nil\n}", "func (p project) gitClone() error {\n\tif p.SkipClone {\n\t\treturn nil\n\t}\n\tcmd := fmt.Sprintf(\"git clone -b %s %s %s\", p.Branch, p.Repo, localRepos+p.Name)\n\treturn doExec(cmd, \"\")\n}", "func SkipNetworkTest() string {\n\tfor _, env := range []string{\"MG_DOMAIN\", \"MG_API_KEY\", \"MG_EMAIL_TO\", \"MG_PUBLIC_API_KEY\"} {\n\t\tif os.Getenv(env) == \"\" {\n\t\t\treturn fmt.Sprintf(\"'%s' missing from environment skipping...\", env)\n\t\t}\n\t}\n\treturn \"\"\n}", "func Fetch() {\n\terr := RunCommand(\"git\", \"fetch\", \"--prune\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func TestRepository_LibGit2_Concurrency(t *testing.T) {\n\tp := runtime.GOMAXPROCS(0)\n\tif p == 1 {\n\t\tt.Skip(\"no point in testing concurrency with GOMAXPROCS=1\")\n\t}\n\n\torigRepo := makeGitRepositoryLibGit2(t,\n\t\t\"echo hello > a\",\n\t\t\"git add a\",\n\t\t\"GIT_COMMITTER_NAME=a [email protected] GIT_COMMITTER_DATE=2006-01-02T15:04:05Z git commit --allow-empty -m foo --author='a <[email protected]>' --date 2006-01-02T15:04:05Z\",\n\t\t\"GIT_COMMITTER_NAME=a [email protected] GIT_COMMITTER_DATE=2006-01-02T15:04:05Z git commit --allow-empty -m bar --author='a <[email protected]>' --date 2006-01-02T17:04:05Z\",\n\t)\n\n\tn := 10\n\tstart := time.Now()\n\tduration := 5 * time.Second\n\n\tfor i := 0; i < n; i++ {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tif time.Since(start) > duration {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trepo, err := git_libgit2.OpenGitRepositoryLibGit2(origRepo.Dir)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tcommitID, err := repo.ResolveRevision(\"master\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t_, err = repo.GetCommit(commitID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t_, _, err = repo.Commits(vcs.CommitsOptions{Head: commitID})\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tfs, err := repo.FileSystem(commitID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tentries, err := fs.ReadDir(\".\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif len(entries) != 1 {\n\t\t\t\t\tt.Errorf(\"got entries %v, want 1 entry\", entries)\n\t\t\t\t}\n\n\t\t\t\tfor _, entry := range entries {\n\t\t\t\t\tf, err := fs.Open(entry.Name())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Error(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tf.Close()\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\ttime.Sleep(duration + 500*time.Millisecond)\n}", "func (g *GitLocal) Info(dir string) (*GitRepository, error) {\n\treturn g.GitCLI.Info(dir)\n}", "func gitInit(t testing.TB, dir string) {\n\tt.Helper()\n\tmustHaveGit(t)\n\n\tif _, err := run(t, dir, \"git\", \"init\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := run(t, dir, \"git\", \"config\", \"user.name\", \"Go Gopher\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := run(t, dir, \"git\", \"config\", \"user.email\", \"[email protected]\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (*RepoInfo) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{3}\n}", "func (*GitCommitInformation) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_reference_proto_rawDescGZIP(), []int{7}\n}", "func AllowEmpty(c *commitConfig) { c.allowEmpty = true }", "func (g *GitLocal) DiscoverRemoteGitURL(gitConf string) (string, error) {\n\treturn g.GitCLI.DiscoverRemoteGitURL(gitConf)\n}", "func (st *Status) checkState(gitdir string) {\n\tst.State = Default\n\t// from: git/contrib/completion/git-prompt.sh\n\tswitch {\n\tcase exists(gitdir, \"rebase-merge\"):\n\t\tst.State = Rebasing\n\tcase exists(gitdir, \"rebase-apply\"):\n\t\tswitch {\n\t\tcase exists(gitdir, \"rebase-apply\", \"rebasing\"):\n\t\t\tst.State = Rebasing\n\t\tcase exists(gitdir, \"rebase-apply\", \"applying\"):\n\t\t\tst.State = AM\n\t\tdefault:\n\t\t\tst.State = AMRebase\n\t\t}\n\tcase exists(gitdir, \"MERGE_HEAD\"):\n\t\tst.State = Merging\n\tcase exists(gitdir, \"CHERRY_PICK_HEAD\"):\n\t\tst.State = CherryPicking\n\tcase exists(gitdir, \"REVERT_HEAD\"):\n\t\tst.State = Reverting\n\tcase exists(gitdir, \"BISECT_LOG\"):\n\t\tst.State = Bisecting\n\t}\n}", "func handleHeadCommit(repopath, checkCommit string, gcl *ginclient.Client) string {\n\tif checkCommit != \"HEAD\" {\n\t\treturn checkCommit\n\t}\n\tuseCommitName, err := getRepoCommit(gcl, repopath)\n\tif err != nil {\n\t\tlog.ShowWrite(\"[Error] could not fetch latest commit for repo %s: %s\", repopath, err.Error())\n\t\treturn checkCommit\n\t}\n\tlog.ShowWrite(\"[Info] repo %s uses commit %s as HEAD commit\", repopath, useCommitName)\n\treturn useCommitName\n}", "func withGit(url, commit, folder string, verbose bool) error {\n\tvar out bytes.Buffer\n\tdata := map[string]string{\n\t\t\"workDir\": folder,\n\t\t\"repoDir\": RepoDir(url),\n\t\t\"remote\": url,\n\t\t\"ref\": commit,\n\t}\n\tif err := gitTemplate.Execute(&out, data); err != nil {\n\t\treturn errors.Wrap(err, \"unable to create git script\")\n\t}\n\tscript := out.String()\n\tif verbose {\n\t\tfmt.Println(script)\n\t}\n\n\tcmd := exec.Command(\"bash\", \"-c\", script)\n\tif verbose {\n\t\tcmdStdout, err := cmd.StdoutPipe()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to create StdOut pipe for bash\")\n\t\t}\n\t\tstdoutScanner := bufio.NewScanner(cmdStdout)\n\t\tgo func() {\n\t\t\tfor stdoutScanner.Scan() {\n\t\t\t\tfmt.Println(\"bash: \" + stdoutScanner.Text())\n\t\t\t}\n\t\t}()\n\n\t\tcmdStderr, err := cmd.StderrPipe()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to create StdErr pipe for bash\")\n\t\t}\n\t\tstderrScanner := bufio.NewScanner(cmdStderr)\n\t\tgo func() {\n\t\t\tfor stderrScanner.Scan() {\n\t\t\t\tfmt.Println(\"bash: \" + stderrScanner.Text())\n\t\t\t}\n\t\t}()\n\t}\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to start cloning with git\")\n\t}\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to clone with git\")\n\t}\n\treturn nil\n}", "func nopDiagnose(ctx context.Context, outDir string) string {\n\treturn \"\"\n}" ]
[ "0.607692", "0.56856716", "0.5637586", "0.5620896", "0.56051344", "0.5575217", "0.5547749", "0.5355762", "0.5312566", "0.5303206", "0.53025454", "0.5283581", "0.52761084", "0.52436495", "0.52291006", "0.52157104", "0.51697", "0.5164014", "0.5152997", "0.51269424", "0.5113547", "0.5090089", "0.5073859", "0.49964523", "0.49936038", "0.4985404", "0.49737412", "0.49717477", "0.49664277", "0.4964303", "0.49595162", "0.49547264", "0.49284914", "0.49187303", "0.49108738", "0.4894216", "0.4874317", "0.48715946", "0.48666993", "0.48653385", "0.4863737", "0.48611438", "0.4860039", "0.48569116", "0.4854037", "0.4852945", "0.4849671", "0.48482978", "0.4831688", "0.48289427", "0.48264116", "0.48067883", "0.48024073", "0.4793406", "0.4771361", "0.47689065", "0.47640717", "0.4763958", "0.47596645", "0.47596645", "0.47572818", "0.47538775", "0.47455633", "0.47454256", "0.47358638", "0.47289544", "0.47252166", "0.47188908", "0.47008258", "0.46936086", "0.46830153", "0.46826893", "0.46783358", "0.4674571", "0.4672088", "0.4669365", "0.46691018", "0.46616247", "0.46581087", "0.4655433", "0.46550447", "0.46522805", "0.46503946", "0.4649321", "0.46464038", "0.46212685", "0.46139953", "0.46130577", "0.46112356", "0.46071658", "0.46041816", "0.46013328", "0.4599449", "0.45896015", "0.45823967", "0.45736656", "0.45692426", "0.45671675", "0.4566474", "0.4558471", "0.45574445" ]
0.0
-1
SetStatistics set the container statistics
func (cs *Stats) SetStatistics(s StatsEntry) { cs.mutex.Lock() defer cs.mutex.Unlock() s.Container = cs.Container cs.StatsEntry = s }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Cluster) SetStatistics(v []*KeyValuePair) *Cluster {\n\ts.Statistics = v\n\treturn s\n}", "func (s *BotRecommendationResults) SetStatistics(v *BotRecommendationResultStatistics) *BotRecommendationResults {\n\ts.Statistics = v\n\treturn s\n}", "func (s *DriftCheckModelQuality) SetStatistics(v *MetricsSource) *DriftCheckModelQuality {\n\ts.Statistics = v\n\treturn s\n}", "func (container *container) Statistics() (Statistics, error) {\r\n\tproperties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics)\r\n\tif err != nil {\r\n\t\treturn Statistics{}, convertSystemError(err, container)\r\n\t}\r\n\r\n\treturn properties.Statistics, nil\r\n}", "func (s *ModelQuality) SetStatistics(v *MetricsSource) *ModelQuality {\n\ts.Statistics = v\n\treturn s\n}", "func (s *DriftCheckModelDataQuality) SetStatistics(v *MetricsSource) *DriftCheckModelDataQuality {\n\ts.Statistics = v\n\treturn s\n}", "func (r *Redis) SetStats(node string, stats types.Stats) error {\n\terr := r.c.ZAdd(string(keys.StatsList), redis.Z{\n\t\tMember: node,\n\t\tScore: stats.CPU.SystemLoad / float64(stats.CPU.Cores),\n\t}).Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := json.Marshal(stats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.c.Set(keys.PrefixNodeStats.Fmt(node), b, 0).Err()\n}", "func (s *ModelDataQuality) SetStatistics(v *MetricsSource) *ModelDataQuality {\n\ts.Statistics = v\n\treturn s\n}", "func (r *RollingStoreStats) Set(stats *pdpb.StoreStats) {\n\tstatInterval := stats.GetInterval()\n\tinterval := float64(statInterval.GetEndTimestamp() - statInterval.GetStartTimestamp())\n\tif interval == 0 {\n\t\treturn\n\t}\n\tr.Lock()\n\tdefer r.Unlock()\n\treadQueryNum, writeQueryNum := core.GetReadQueryNum(stats.QueryStats), core.GetWriteQueryNum(stats.QueryStats)\n\tr.timeMedians[utils.StoreWriteBytes].Set(float64(stats.BytesWritten) / interval)\n\tr.timeMedians[utils.StoreReadBytes].Set(float64(stats.BytesRead) / interval)\n\tr.timeMedians[utils.StoreWriteKeys].Set(float64(stats.KeysWritten) / interval)\n\tr.timeMedians[utils.StoreReadKeys].Set(float64(stats.KeysRead) / interval)\n\tr.timeMedians[utils.StoreReadQuery].Set(float64(readQueryNum) / interval)\n\tr.timeMedians[utils.StoreWriteQuery].Set(float64(writeQueryNum) / interval)\n\tr.movingAvgs[utils.StoreCPUUsage].Set(collect(stats.GetCpuUsages()))\n\tr.movingAvgs[utils.StoreDiskReadRate].Set(collect(stats.GetReadIoRates()))\n\tr.movingAvgs[utils.StoreDiskWriteRate].Set(collect(stats.GetWriteIoRates()))\n}", "func (c *Conn) SetStats(stats *Stats) {\n\tc.Stats = stats\n}", "func (c *ColumnChunkMetaData) StatsSet() (bool, error) {\n\tif !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {\n\t\treturn false, nil\n\t}\n\n\tif c.possibleStats == nil {\n\t\tc.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)\n\t}\n\n\tencoded, err := c.possibleStats.Encode()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil\n}", "func (r *registry) SetNetworkStats(chain vaa.ChainID, data *gossipv1.Heartbeat_Network) {\n\tr.mu.Lock()\n\tdata.Id = uint32(chain)\n\tr.networkStats[chain] = data\n\tr.mu.Unlock()\n}", "func (ulw *Wrapper) ResetStatistics() {\n\tulw.ul.ResetStatistics()\n}", "func (s *Service) SetStat(c context.Context, st *api.Stat) (err error) {\n\ts.arc.SetStat3(c, &api.Stat{\n\t\tAid: st.Aid,\n\t\tView: int32(st.View),\n\t\tDanmaku: int32(st.Danmaku),\n\t\tReply: int32(st.Reply),\n\t\tFav: int32(st.Fav),\n\t\tCoin: int32(st.Coin),\n\t\tShare: int32(st.Share),\n\t\tNowRank: int32(st.NowRank),\n\t\tHisRank: int32(st.HisRank),\n\t\tLike: int32(st.Like),\n\t\tDisLike: 0,\n\t})\n\treturn\n}", "func (m *EdiscoverySearch) SetLastEstimateStatisticsOperation(value EdiscoveryEstimateOperationable)() {\n m.lastEstimateStatisticsOperation = value\n}", "func (d TestSink) Set(c *telemetry.Context, stat string, value float64) {\n\td[stat] = TestMetric{\"Set\", value, c.Tags()}\n}", "func (c *HBComp) updateCrudeStatistics() {\n\tvt := models.ValueType{\n\t\tKind: \"STRING\",\n\t\tValue: c.app.CrudeOps.GetStats().String(),\n\t}\n\tc.app.Service.SetServiceAttribute(com.ServiceAttrCrudeStatistics, vt)\n}", "func (r *RollingStoreStats) SetRegionsStats(writeBytesRate, writeKeysRate float64) {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.movingAvgs[utils.StoreRegionsWriteBytes].Set(writeBytesRate)\n\tr.movingAvgs[utils.StoreRegionsWriteKeys].Set(writeKeysRate)\n}", "func Statistics(commands <-chan parser.Cmd) (*Stats, error) {\n\tstats := Stats{\n\t\tPropertiesPerType: make(map[string]int),\n\t\tPropertiesPerDepth: make(map[int]int),\n\t\tNodesPerDepth: make(map[int]int),\n\t\tValuesPerSize: make(map[int]int),\n\t}\n\n\tif err := stats.parse(commands); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &stats, nil\n}", "func (mlw *Wrapper) ResetStatistics() {\n\tmlw.ml.ResetStatistics()\n}", "func (sl *StagesLatency) ResetStatistics() {\n\tsl.first = duplicateSlice(sl.last)\n\tsl.FirstCollected = sl.LastCollected\n\n\tsl.calculate()\n}", "func (r *RPC) SetStat(c context.Context, arg *model.ArgStats, res *struct{}) (err error) {\n\terr = r.s.SetStat(c, arg.Aid, arg.Stats)\n\treturn\n}", "func (c *Client) setMetrics(status *Status, stats *Stats, logstats *LogStats) {\n\t//Status\n\tvar isRunning int = 0\n\tif status.Running == true {\n\t\tisRunning = 1\n\t}\n\tmetrics.Running.WithLabelValues(c.hostname).Set(float64(isRunning))\n\n\tvar isProtected int = 0\n\tif status.ProtectionEnabled == true {\n\t\tisProtected = 1\n\t}\n\tmetrics.ProtectionEnabled.WithLabelValues(c.hostname).Set(float64(isProtected))\n\n\t//Stats\n\tmetrics.AvgProcessingTime.WithLabelValues(c.hostname).Set(float64(stats.AvgProcessingTime))\n\tmetrics.DnsQueries.WithLabelValues(c.hostname).Set(float64(stats.DnsQueries))\n\tmetrics.BlockedFiltering.WithLabelValues(c.hostname).Set(float64(stats.BlockedFiltering))\n\tmetrics.ParentalFiltering.WithLabelValues(c.hostname).Set(float64(stats.ParentalFiltering))\n\tmetrics.SafeBrowsingFiltering.WithLabelValues(c.hostname).Set(float64(stats.SafeBrowsingFiltering))\n\tmetrics.SafeSearchFiltering.WithLabelValues(c.hostname).Set(float64(stats.SafeSearchFiltering))\n\n\tfor l := range stats.TopQueries {\n\t\tfor domain, value := range stats.TopQueries[l] {\n\t\t\tmetrics.TopQueries.WithLabelValues(c.hostname, domain).Set(float64(value))\n\t\t}\n\t}\n\n\tfor l := range stats.TopBlocked {\n\t\tfor domain, value := range stats.TopBlocked[l] {\n\t\t\tmetrics.TopBlocked.WithLabelValues(c.hostname, domain).Set(float64(value))\n\t\t}\n\t}\n\n\tfor l := range stats.TopClients {\n\t\tfor source, value := range stats.TopClients[l] {\n\t\t\tmetrics.TopClients.WithLabelValues(c.hostname, source).Set(float64(value))\n\t\t}\n\t}\n\n\t//LogQuery\n\tm = make(map[string]int)\n\tlogdata := logstats.Data\n\tfor i := range logdata {\n\t\tdnsanswer := logdata[i].Answer\n\t\tif dnsanswer != nil && len(dnsanswer) > 0 {\n\t\t\tfor j := range dnsanswer {\n\t\t\t\tvar dnsType string\n\t\t\t\t//Check the type of dnsanswer[j].Value, if string leave it be, otherwise get back the object to get the correct DNS type\n\t\t\t\tswitch v := dnsanswer[j].Value.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tdnsType = dnsanswer[j].Type\n\t\t\t\t\tm[dnsType] += 1\n\t\t\t\tcase map[string]interface{}:\n\t\t\t\t\tvar dns65 Type65\n\t\t\t\t\tmapstructure.Decode(v, &dns65)\n\t\t\t\t\tdnsType = \"TYPE\" + strconv.Itoa(dns65.Hdr.Rrtype)\n\t\t\t\t\tm[dnsType] += 1\n\t\t\t\tdefault:\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor key, value := range m {\n\t\tmetrics.QueryTypes.WithLabelValues(c.hostname, key).Set(float64(value))\n\t}\n\n\t//clear the map\n\tfor k := range m {\n\t\tdelete(m, k)\n\t}\n}", "func (o *AggregatedDomain) Statistics(info *bambou.FetchingInfo) (StatisticsList, *bambou.Error) {\n\n\tvar list StatisticsList\n\terr := bambou.CurrentSession().FetchChildren(o, StatisticsIdentity, &list, info)\n\treturn list, err\n}", "func (o *VRS) Statistics(info *bambou.FetchingInfo) (StatisticsList, *bambou.Error) {\n\n\tvar list StatisticsList\n\terr := bambou.CurrentSession().FetchChildren(o, StatisticsIdentity, &list, info)\n\treturn list, err\n}", "func (c *Collector) Set(inner prometheus.Collector) {\n\tc.mut.Lock()\n\tdefer c.mut.Unlock()\n\n\tc.inner = inner\n}", "func (p *Pools) PrintStatistics() {\n\tvar numTCPFlows int64\n\tvar numTCPPackets int64\n\tvar numUDPFlows int64\n\tvar numUDPPackets int64\n\tvar counterLock sync.Mutex\n\tfor _, pool := range p.pools {\n\t\tpool.printStatistics(&numTCPFlows, &numTCPPackets, &numUDPFlows, &numUDPPackets, &counterLock)\n\t}\n\n\tfmt.Println(\"Number of TCP Flows in Pool:\\t\", humanize.Comma(numTCPFlows))\n\tfmt.Println(\"Number of TCP Packets in Pool:\\t\", humanize.Comma(numTCPPackets))\n\n\tfmt.Println(\"Number of UDP Flows in Pool:\\t\", humanize.Comma(numUDPFlows))\n\tfmt.Println(\"Number of UDP Packets in Pool:\\t\", humanize.Comma(numUDPPackets))\n}", "func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {\n\tok, err := c.StatsSet()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif ok {\n\t\treturn c.possibleStats, nil\n\t}\n\treturn nil, nil\n}", "func (client Client) SetMetaData(ctx context.Context, accountName, containerName string, metaData map[string]string) (autorest.Response, error) {\n\treturn client.SetMetaDataWithLeaseID(ctx, accountName, containerName, \"\", metaData)\n}", "func ObjsetStats(name string) (props DatasetPropsWithSource, err error) {\n\tprops = make(DatasetPropsWithSource)\n\tcmd := &Cmd{}\n\tif err = NvlistIoctl(zfsHandle.Fd(), ZFS_IOC_OBJSET_STATS, name, cmd, nil, props, nil); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (dms *MemoryMetricsCollector) Set(name string, value float64, labels map[string]string, options ...Option) {\n\topts := dms.defaultMetricsOptions()\n\n\tfor _, opt := range options {\n\t\topt(opts)\n\t}\n\n\tdms.metrics = append(dms.metrics, operation.MetricOperation{\n\t\tName: name,\n\t\tGroup: opts.group,\n\t\tAction: \"set\",\n\t\tValue: pointer.Float64Ptr(value),\n\t\tLabels: labels,\n\t})\n}", "func (ooc *MockOpenoltClient) CollectStatistics(ctx context.Context, in *openolt.Empty, opts ...grpc.CallOption) (*openolt.Empty, error) {\n\treturn &openolt.Empty{}, nil\n}", "func (s *Segment) SetMetadata(spyName string, sampleRate uint32, units, aggregationType string) {\n\ts.spyName = spyName\n\ts.sampleRate = sampleRate\n\ts.units = units\n\ts.aggregationType = aggregationType\n}", "func (s *DevStat) SetFltUpdateStats(start time.Time, duration time.Duration) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\ts.Counters[FilterStartTime] = start.Unix()\n\ts.Counters[FilterDuration] = duration.Seconds()\n}", "func (n *mockAgent) statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error) {\n\treturn &ContainerStats{}, nil\n}", "func (s *sizes) setSizes(width int, height int) {\n\ts.width = width\n\ts.height = height\n\ts.curStreamsPerStreamDisplay = 1 + height/10\n}", "func (c *Context) Set(stat string, value float64) {\n\tfor _, sink := range c.sinks {\n\t\tsink.Set(c, stat, value)\n\t}\n}", "func (c *Creature) TotalStatistics() {\n\n\tfor _, s := range c.Statistics {\n\n\t\ts.UpdateStatistic()\n\t}\n}", "func (client *Client) DescribeContainerStatisticsWithOptions(request *DescribeContainerStatisticsRequest, runtime *util.RuntimeOptions) (_result *DescribeContainerStatisticsResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.ClusterId)) {\n\t\tquery[\"ClusterId\"] = request.ClusterId\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"DescribeContainerStatistics\"),\n\t\tVersion: tea.String(\"2018-12-03\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &DescribeContainerStatisticsResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func NewStatistics(ns string, la int64, lbr int64) *Statistics {\n\treturn &Statistics{\n\t\tNamespace: ns,\n\t\tLastAccessed: la,\n\t\tLastBufferedRequest: lbr,\n\t}\n}", "func (s *StoresStats) SetRegionsStats(storeIDs []uint64, writeBytesRates, writeKeysRates []float64) {\n\tfor i, storeID := range storeIDs {\n\t\trollingStoreStat := s.GetOrCreateRollingStoreStats(storeID)\n\t\trollingStoreStat.SetRegionsStats(writeBytesRates[i], writeKeysRates[i])\n\t}\n}", "func testStats(t *testing.T,\n\ts *runtime.ContainerStats,\n\tconfig *runtime.ContainerConfig,\n) {\n\trequire.NotEmpty(t, s.GetAttributes().GetId())\n\trequire.NotEmpty(t, s.GetAttributes().GetMetadata())\n\trequire.NotEmpty(t, s.GetAttributes().GetAnnotations())\n\trequire.Equal(t, s.GetAttributes().GetLabels(), config.Labels)\n\trequire.Equal(t, s.GetAttributes().GetAnnotations(), config.Annotations)\n\trequire.Equal(t, s.GetAttributes().GetMetadata().Name, config.Metadata.Name)\n\trequire.NotEmpty(t, s.GetAttributes().GetLabels())\n\trequire.NotEmpty(t, s.GetCpu().GetTimestamp())\n\trequire.NotEmpty(t, s.GetCpu().GetUsageCoreNanoSeconds().GetValue())\n\trequire.NotEmpty(t, s.GetMemory().GetTimestamp())\n\trequire.NotEmpty(t, s.GetMemory().GetWorkingSetBytes().GetValue())\n\trequire.NotEmpty(t, s.GetWritableLayer().GetTimestamp())\n\trequire.NotEmpty(t, s.GetWritableLayer().GetFsId().GetMountpoint())\n\n\t// UsedBytes of a fresh container can be zero on Linux, depending on the backing filesystem.\n\t// https://github.com/containerd/containerd/issues/7909\n\tif goruntime.GOOS == \"windows\" {\n\t\trequire.NotEmpty(t, s.GetWritableLayer().GetUsedBytes().GetValue())\n\t}\n\n\t// Windows does not collect inodes stats.\n\tif goruntime.GOOS != \"windows\" {\n\t\trequire.NotEmpty(t, s.GetWritableLayer().GetInodesUsed().GetValue())\n\t}\n}", "func injectStatistics(qualifiedTableName string, table *workload.Table, sqlDB *gosql.DB) error {\n\tvar encoded []byte\n\tencoded, err := json.Marshal(table.Stats)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := sqlDB.Exec(\n\t\tfmt.Sprintf(`ALTER TABLE %s INJECT STATISTICS '%s'`, qualifiedTableName, encoded),\n\t); err != nil {\n\t\tif strings.Contains(err.Error(), \"syntax error\") {\n\t\t\t// This syntax was added in v2.1, so ignore the syntax error\n\t\t\t// if run against versions earlier than this.\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *vertexCollection) Statistics(ctx context.Context) (CollectionStatistics, error) {\n\tresult, err := c.rawCollection().Statistics(ctx)\n\tif err != nil {\n\t\treturn CollectionStatistics{}, WithStack(err)\n\t}\n\treturn result, nil\n}", "func NewStatistics(loggingPeriod time.Duration) *Statistics {\n\tsw := Statistics{\n\t\tstatistics: make(chan uint8, statisticsChannelSize),\n\t\tcounter: 0,\n\t\tstart: time.Now(),\n\t\tloggingPeriod: loggingPeriod,\n\t}\n\tgo sw.run()\n\treturn &sw\n}", "func (m *Monitor) fetchStats(container dockerContainer, labelMap map[string]string, envMap map[string]string, enhancedMetricsConfig EnhancedMetricsConfig) {\n\tctx, cancel := context.WithTimeout(m.ctx, m.timeout)\n\tstats, err := m.client.ContainerStats(ctx, container.ID, false)\n\tif err != nil {\n\t\tcancel()\n\t\tif isContainerNotFound(err) {\n\t\t\tm.logger.Debugf(\"container %s is not found in cache\", container.ID)\n\t\t\treturn\n\t\t}\n\t\tm.logger.WithError(err).Errorf(\"Could not fetch docker stats for container id %s\", container.ID)\n\t\treturn\n\t}\n\n\tvar parsed dtypes.StatsJSON\n\terr = json.NewDecoder(stats.Body).Decode(&parsed)\n\tstats.Body.Close()\n\tif err != nil {\n\t\tcancel()\n\t\t// EOF means that there aren't any stats, perhaps because the container\n\t\t// is gone. Just return nothing and no error.\n\t\tif err == io.EOF {\n\t\t\treturn\n\t\t}\n\t\tm.logger.WithError(err).Errorf(\"Could not parse docker stats for container id %s\", container.ID)\n\t\treturn\n\t}\n\n\tdps, err := ConvertStatsToMetrics(container.ContainerJSON, &parsed, enhancedMetricsConfig)\n\tcancel()\n\tif err != nil {\n\t\tm.logger.WithError(err).Errorf(\"Could not convert docker stats for container id %s\", container.ID)\n\t\treturn\n\t}\n\n\tfor i := range dps {\n\t\tfor k, dimName := range envMap {\n\t\t\tif v := container.EnvMap[k]; v != \"\" {\n\t\t\t\tdps[i].Dimensions[dimName] = v\n\t\t\t}\n\t\t}\n\t\tfor k, dimName := range labelMap {\n\t\t\tif v := container.Config.Labels[k]; v != \"\" {\n\t\t\t\tdps[i].Dimensions[dimName] = v\n\t\t\t}\n\t\t}\n\t}\n\tm.Output.SendDatapoints(dps...)\n}", "func (m *User) SetInsights(value OfficeGraphInsightsable)() {\n m.insights = value\n}", "func TestCollectionStatistics(t *testing.T) {\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(nil, c, \"collection_test\", nil, t)\n\tname := \"test_collection_statistics\"\n\tcol, err := db.CreateCollection(nil, name, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create collection '%s': %s\", name, describe(err))\n\t}\n\n\t// create some documents\n\tfor i := 0; i < 10; i++ {\n\t\tbefore, err := col.Statistics(nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to fetch before statistics: %s\", describe(err))\n\t\t}\n\t\tdoc := Book{Title: fmt.Sprintf(\"Book %d\", i)}\n\t\tif _, err := col.CreateDocument(nil, doc); err != nil {\n\t\t\tt.Fatalf(\"Failed to create document: %s\", describe(err))\n\t\t}\n\t\tafter, err := col.Statistics(nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to fetch after statistics: %s\", describe(err))\n\t\t}\n\t\tif before.Count+1 != after.Count {\n\t\t\tt.Errorf(\"Expected Count before, after to be 1 different. Got %d, %d\", before.Count, after.Count)\n\t\t}\n\t\tif before.Figures.DataFiles.FileSize > after.Figures.DataFiles.FileSize {\n\t\t\tt.Errorf(\"Expected DataFiles.FileSize before <= after. Got %d, %d\", before.Figures.DataFiles.FileSize, after.Figures.DataFiles.FileSize)\n\t\t}\n\t}\n}", "func (c *StatsClient) Set(name string, value string) {\n\tif err := c.client.Set(name, value, c.tags, Rate); err != nil {\n\t\tc.logger().Printf(\"datadog.StatsClient.Set error: %s\", err)\n\t}\n}", "func (p *Proxy) recordStatistics(ns string, la int64, lbf int64) (err error) {\n\tlog.WithField(\"ns\", ns).Debug(\"Recording stats\")\n\ts, notFound, err := p.storageService.GetStatisticsUser(ns)\n\tif err != nil {\n\t\tlog.WithFields(\n\t\t\tlog.Fields{\n\t\t\t\t\"ns\": ns,\n\t\t\t}).Warningf(\"Could not load statistics: %s\", err)\n\t\tif !notFound {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif notFound {\n\t\tlog.WithField(\"ns\", ns).Infof(\"New user %s\", ns)\n\t\ts = storage.NewStatistics(ns, la, lbf)\n\t\terr = p.storageService.CreateStatistics(s)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not create statistics for %s: %s\", ns, err)\n\t\t}\n\t\treturn\n\t}\n\n\tif la != 0 {\n\t\ts.LastAccessed = la\n\t}\n\n\tif lbf != 0 {\n\t\ts.LastBufferedRequest = lbf\n\t}\n\n\tp.visitLock.Lock()\n\terr = p.storageService.UpdateStatistics(s)\n\tp.visitLock.Unlock()\n\tif err != nil {\n\t\tlog.WithField(\"ns\", ns).Errorf(\"Could not update statistics for %s: %s\", ns, err)\n\t}\n\n\treturn\n}", "func (s *Basememcached_protocolListener) EnterStatistics_option(ctx *Statistics_optionContext) {}", "func (cm *Docker) SetMetrics(metrics models.Metrics) {\n\tif config.GetSwitchVal(\"swarmMode\") {\n\t\ttask := cm.MustGetTask(metrics.Id)\n\t\ttask.SetMetrics(metrics)\n\t\treturn\n\t}\n\tcont := cm.MustGetContainer(metrics.Id)\n\tcont.SetMetrics(metrics)\n}", "func (s *Basememcached_protocolListener) EnterStatistics_command(ctx *Statistics_commandContext) {}", "func (r *AWSEC2SecurityGroup_Egress) SetMetadata(metadata map[string]interface{}) {\n\tr._metadata = metadata\n}", "func init() {\n\tmb.Registry.MustAddMetricSet(\"connection\", \"load_stats\", New)\n}", "func (self *TileSprite) SetSetHealthA(member interface{}) {\n self.Object.Set(\"setHealth\", member)\n}", "func NewStatistics() Statistics {\n\treturn Statistics{\n\t\tMean: NewDayTypeTimeseries(),\n\t\tStdev: NewDayTypeTimeseries(),\n\t}\n}", "func (ds *Dataset) stats() {\n\tif ds.Stats == nil {\n\t\tds.Stats = &Stats{}\n\t}\n\tif ds.Mtx != nil {\n\t\tds.Stats.Rows, ds.Stats.Columns = ds.Mtx.Dims()\n\t}\n}", "func (r *remoteRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] ListContainerStats\", \"filter\", filter)\n\t// Do not set timeout, because writable layer stats collection takes time.\n\t// TODO(random-liu): Should we assume runtime should cache the result, and set timeout here?\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\treturn r.listContainerStatsV1(ctx, filter)\n}", "func TestStatistics(t *testing.T) {\n\tet, err := createExplorerTester(\"TestStatistics\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tstats := et.explorer.Statistics()\n\tif stats.Height != et.explorer.blockchainHeight || et.explorer.blockchainHeight == 0 {\n\t\tt.Error(\"wrong height reported in stats object\")\n\t}\n\tif stats.TransactionCount != et.explorer.transactionCount || et.explorer.transactionCount == 0 {\n\t\tt.Error(\"wrong transaction count reported in stats object\")\n\t}\n}", "func (sb *Sandbox) Statistics() (map[string]*types.InterfaceStatistics, error) {\n\tm := make(map[string]*types.InterfaceStatistics)\n\n\tsb.mu.Lock()\n\tosb := sb.osSbox\n\tsb.mu.Unlock()\n\tif osb == nil {\n\t\treturn m, nil\n\t}\n\n\tvar err error\n\tfor _, i := range osb.Interfaces() {\n\t\tif m[i.DstName()], err = i.Statistics(); err != nil {\n\t\t\treturn m, err\n\t\t}\n\t}\n\n\treturn m, nil\n}", "func (o *QtreeCollectionGetParams) SetStatisticsTimestamp(statisticsTimestamp *string) {\n\to.StatisticsTimestamp = statisticsTimestamp\n}", "func (is *ImageServer) AddStats(width int, height int) {\n\n\tis.Lock()\n\tdefer is.Unlock()\n\n\tis.NumImages++\n\tis.WidthSum += width\n\tis.HeightSum += height\n}", "func (c *Client) containerStats(ctx context.Context, id string) (*containerdtypes.Metric, error) {\n\tif !c.lock.TrylockWithRetry(ctx, id) {\n\t\treturn nil, errtypes.ErrLockfailed\n\t}\n\tdefer c.lock.Unlock(id)\n\n\tpack, err := c.watch.get(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmetrics, err := pack.task.Metrics(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn metrics, nil\n}", "func (t *task) Stats(_ context.Context) (*libcontainerdtypes.Stats, error) {\n\thc, err := t.getHCSContainer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treadAt := time.Now()\n\ts, err := hc.Statistics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &libcontainerdtypes.Stats{\n\t\tRead: readAt,\n\t\tHCSStats: &s,\n\t}, nil\n}", "func (s *DevStat) SetGatherDuration(start time.Time, duration time.Duration) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\ts.Counters[CycleGatherStartTime] = start.Unix()\n\ts.Counters[CycleGatherDuration] = duration.Seconds()\n}", "func (c *cpusetHandler) Stat(ctr *CgroupControl, m *Metrics) error {\n\treturn nil\n}", "func BackupStatistics(statisticsFile *utils.FileWithByteCount, tables []Relation) {\n\tattStats := GetAttributeStatistics(connectionPool, tables)\n\ttupleStats := GetTupleStatistics(connectionPool, tables)\n\n\tBackupSessionGUCs(statisticsFile)\n\tPrintStatisticsStatements(statisticsFile, globalTOC, tables, attStats, tupleStats)\n}", "func (s *VicStreamProxy) StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error {\n\top := trace.FromContext(ctx, \"\")\n\tdefer trace.End(trace.Begin(config.ContainerID, op))\n\topID := op.ID()\n\n\tif s.client == nil {\n\t\treturn errors.NillPortlayerClientError(\"StreamProxy\")\n\t}\n\n\t// create a child context that we control\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tparams := containers.NewGetContainerStatsParamsWithContext(op).WithOpID(&opID)\n\tparams.ID = config.ContainerID\n\tparams.Stream = config.Stream\n\n\tconfig.Ctx = ctx\n\tconfig.Cancel = cancel\n\n\t// create our converter\n\tcontainerConverter := convert.NewContainerStats(config)\n\t// provide the writer for the portLayer and start listening for metrics\n\twriter := containerConverter.Listen()\n\tif writer == nil {\n\t\t// problem with the listener\n\t\treturn errors.InternalServerError(fmt.Sprintf(\"unable to gather container(%s) statistics\", config.ContainerID))\n\t}\n\n\t_, err := s.client.Containers.GetContainerStats(params, writer)\n\tif err != nil {\n\t\tswitch err := err.(type) {\n\t\tcase *containers.GetContainerStatsNotFound:\n\t\t\treturn errors.NotFoundError(config.ContainerID)\n\t\tcase *containers.GetContainerStatsInternalServerError:\n\t\t\treturn errors.InternalServerError(\"Server error from the interaction port layer\")\n\t\tdefault:\n\t\t\tif ctx.Err() == context.Canceled {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t//Check for EOF. Since the connection, transport, and data handling are\n\t\t\t//encapsulated inside of Swagger, we can only detect EOF by checking the\n\t\t\t//error string\n\t\t\tif strings.Contains(err.Error(), SwaggerSubstringEOF) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn errors.InternalServerError(fmt.Sprintf(\"Unknown error from the interaction port layer: %s\", err))\n\t\t}\n\t}\n\treturn nil\n}", "func (c *ContainerClient) SetMetadata(ctx context.Context, o *ContainerSetMetadataOptions) (ContainerSetMetadataResponse, error) {\n\tmetadataOptions, lac, mac := o.format()\n\tresp, err := c.client.SetMetadata(ctx, metadataOptions, lac, mac)\n\n\treturn toContainerSetMetadataResponse(resp), handleError(err)\n}", "func (this *ContainerCtl) Stats(ctx context.Context) {\n\tvar id string\n\tid = ctx.Params().Get(\"id\")\n\n\tcli := GetDockerClient()\n\tresp, err := cli.ContainerStats(stdContext.Background(), id, true)\n\tif err != nil {\n\t\tlog.Println(\"ContainerExport err:\", err.Error())\n\t\tthis.ReturnJSon(ctx, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tthis.ReturnJSon(ctx, http.StatusOK, \"ok\", resp)\n\treturn\n}", "func (ctn *Container) SetMetrics(ctnMetrics *metrics.ContainerMetrics) {\n\tif ctnMetrics == nil {\n\t\treturn\n\t}\n\n\tctn.CPU = ctnMetrics.CPU\n\tctn.IO = ctnMetrics.IO\n\tctn.Memory = ctnMetrics.Memory\n}", "func (srv *Server) setServicesHealth() {\n\n\tfor service := range srv.gRPC.GetServiceInfo() {\n\t\tsrv.health.SetServingStatus(service, healthpb.HealthCheckResponse_SERVING)\n\t\t// TODO: use debug log\n\t\t//log.Printf(\"Service health info %s is serving\\n\", service)\n\t}\n\n\tsrv.startHealthMonitor()\n\tlog.Printf(\"%s server health monitor started\", srv.name)\n}", "func (r *AWSAppStreamStack) SetMetadata(metadata map[string]interface{}) {\n\tr._metadata = metadata\n}", "func (s *Set) Stats() *Stats {\n\tstats := &Stats{\n\t\tAdds: s.addCalls,\n\t\tErrors: s.addErrors,\n\t\tNodes: len(s.nodes),\n\t\tSymbols: len(s.symid),\n\t}\n\tfor _, facts := range s.facts {\n\t\tstats.Facts += len(facts)\n\t}\n\tfor _, edges := range s.edges {\n\t\tstats.Edges += len(edges)\n\t}\n\treturn stats\n}", "func (s *Service) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"opentsdb\",\n\t\tTags: s.defaultTags.Merge(tags),\n\t\tValues: map[string]interface{}{\n\t\t\tstatHTTPConnectionsHandled: atomic.LoadInt64(&s.stats.HTTPConnectionsHandled),\n\t\t\tstatTelnetConnectionsActive: atomic.LoadInt64(&s.stats.ActiveTelnetConnections),\n\t\t\tstatTelnetConnectionsHandled: atomic.LoadInt64(&s.stats.HandledTelnetConnections),\n\t\t\tstatTelnetPointsReceived: atomic.LoadInt64(&s.stats.TelnetPointsReceived),\n\t\t\tstatTelnetBytesReceived: atomic.LoadInt64(&s.stats.TelnetBytesReceived),\n\t\t\tstatTelnetReadError: atomic.LoadInt64(&s.stats.TelnetReadError),\n\t\t\tstatTelnetBadLine: atomic.LoadInt64(&s.stats.TelnetBadLine),\n\t\t\tstatTelnetBadTime: atomic.LoadInt64(&s.stats.TelnetBadTime),\n\t\t\tstatTelnetBadTag: atomic.LoadInt64(&s.stats.TelnetBadTag),\n\t\t\tstatTelnetBadFloat: atomic.LoadInt64(&s.stats.TelnetBadFloat),\n\t\t\tstatBatchesTransmitted: atomic.LoadInt64(&s.stats.BatchesTransmitted),\n\t\t\tstatPointsTransmitted: atomic.LoadInt64(&s.stats.PointsTransmitted),\n\t\t\tstatBatchesTransmitFail: atomic.LoadInt64(&s.stats.BatchesTransmitFail),\n\t\t\tstatConnectionsActive: atomic.LoadInt64(&s.stats.ActiveConnections),\n\t\t\tstatConnectionsHandled: atomic.LoadInt64(&s.stats.HandledConnections),\n\t\t\tstatDroppedPointsInvalid: atomic.LoadInt64(&s.stats.InvalidDroppedPoints),\n\t\t},\n\t}}\n}", "func (mcd *MetricUserClusterDistribution) PrintStatistic(verbose bool) {\n\tfmt.Println(\"Metric User Cluster distribution:\")\n\tfmt.Print(mcd.clusterDistribution.GetStatistics(verbose))\n}", "func (o *QtreeCollectionGetParams) SetStatisticsStatus(statisticsStatus *string) {\n\to.StatisticsStatus = statisticsStatus\n}", "func (s *StoresStats) Set(storeID uint64, stats *pdpb.StoreStats) {\n\trollingStoreStat := s.GetOrCreateRollingStoreStats(storeID)\n\trollingStoreStat.Set(stats)\n}", "func (m *UrlMap) AddStatistics(requestMethod, requestUrl string, requesttime time.Duration) {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\tif method, ok := m.urlmap[requestUrl]; ok {\n\t\tif s, ok := method[requestMethod]; ok {\n\t\t\ts.RequestNum += 1\n\t\t\tif s.MaxTime < requesttime {\n\t\t\t\ts.MaxTime = requesttime\n\t\t\t}\n\t\t\tif s.MinTime > requesttime {\n\t\t\t\ts.MinTime = requesttime\n\t\t\t}\n\t\t\ts.TotalTime += requesttime\n\t\t} else {\n\t\t\tnb := &Statistics{\n\t\t\t\tRequestUrl: requestUrl,\n\t\t\t\tRequestNum: 1,\n\t\t\t\tMinTime: requesttime,\n\t\t\t\tMaxTime: requesttime,\n\t\t\t\tTotalTime: requesttime,\n\t\t\t}\n\t\t\tm.urlmap[requestUrl][requestMethod] = nb\n\t\t}\n\n\t} else {\n\t\tif m.LengthLimit > 0 && m.LengthLimit <= len(m.urlmap) {\n\t\t\treturn\n\t\t}\n\t\tmethodmap := make(map[string]*Statistics)\n\t\tnb := &Statistics{\n\t\t\tRequestUrl: requestUrl,\n\t\t\tRequestNum: 1,\n\t\t\tMinTime: requesttime,\n\t\t\tMaxTime: requesttime,\n\t\t\tTotalTime: requesttime,\n\t\t}\n\t\tmethodmap[requestMethod] = nb\n\t\tm.urlmap[requestUrl] = methodmap\n\t}\n}", "func (s *Basememcached_protocolListener) EnterStatistics_response(ctx *Statistics_responseContext) {}", "func (r *remoteRuntimeService) ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) {\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] ContainerStats\", \"containerID\", containerID, \"timeout\", r.timeout)\n\tctx, cancel := context.WithTimeout(ctx, r.timeout)\n\tdefer cancel()\n\n\treturn r.containerStatsV1(ctx, containerID)\n}", "func ContainerStats(client *criapi.RuntimeServiceClient, opts statsOptions, name string) (*MetricsV2, error) {\n\tfilter := &criapi.ContainerStatsFilter{}\n\tif opts.id != \"\" {\n\t\tfilter.Id = opts.id\n\t}\n\tif opts.podID != \"\" {\n\t\tfilter.PodSandboxId = opts.podID\n\t}\n\tif opts.labels != nil {\n\t\tfilter.LabelSelector = opts.labels\n\t}\n\trequest := &criapi.ListContainerStatsRequest{\n\t\tFilter: filter,\n\t}\n\n\tmetrics := &MetricsV2{}\n\tvar err error\n\tif metrics, err = displayStats(client, request, name); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn metrics, nil\n}", "func (d *Dao) UpsertStatistics(ctx context.Context, name string, date int, hour int, s *model.StatisticsStat) (err error) {\n\tif _, err = d.db.Exec(ctx, _upsertLog,\n\t\tname, date, hour,\n\t\ts.View, s.HotClick, s.HotView, s.TotalView,\n\t\ts.View, s.HotClick, s.HotView, s.TotalView); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (current WirelessStatistics) SetUtilization(previous WirelessStatistics) {\n\tfor _, c := range current {\n\t\tfor _, p := range previous {\n\t\t\tif c.Frequency == p.Frequency {\n\t\t\t\tc.SetUtilization(p)\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *serverMetricsRecorder) SetMemoryUtilization(val float64) {\n\tif val < 0 || val > 1 {\n\t\tif logger.V(2) {\n\t\t\tlogger.Infof(\"Ignoring Memory Utilization value out of range: %v\", val)\n\t\t}\n\t\treturn\n\t}\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.state.MemUtilization = val\n}", "func convertContainerStats(containerStats *container_info.ContainerStats, latestTime *time.Time) (DetailContainerStats, error) {\n\n\tif containerStats.Timestamp.After(*latestTime) {\n\t\t*latestTime = containerStats.Timestamp\n\t}\n\n\tvar stats DetailContainerStats\n\tstats.Timestamp = containerStats.Timestamp\n\tstats.Cpu_usage_seconds_total = containerStats.Cpu.Usage.Total\n\tstats.Cpu_user_seconds_total = containerStats.Cpu.Usage.User\n\tstats.Cpu_system_seconds_total = containerStats.Cpu.Usage.System\n\n\tstats.Memory_usage_bytes = containerStats.Memory.Usage\n\t//stats.Memory_limit_bytes = containerStats.Memory.Usage\n\tstats.Memory_cache = containerStats.Memory.Cache\n\tstats.Memory_rss = containerStats.Memory.RSS\n\tstats.Memory_swap = containerStats.Memory.Swap\n\n\tstats.Network_receive_bytes_total = containerStats.Network.RxBytes\n\tstats.Network_receive_packets_total = containerStats.Network.RxPackets\n\tstats.Network_receive_packets_dropped_total = containerStats.Network.RxDropped\n\tstats.Network_receive_errors_total = containerStats.Network.RxErrors\n\tstats.Network_transmit_bytes_total = containerStats.Network.TxBytes\n\tstats.Network_transmit_packets_total = containerStats.Network.TxPackets\n\tstats.Network_transmit_packets_dropped_total = containerStats.Network.TxDropped\n\tstats.Network_transmit_errors_total = containerStats.Network.TxErrors\n\n\tstats.Filesystem = converFsStats(containerStats.Filesystem)\n\n\tstats.Diskio_service_bytes_async = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsAsync)\n\tstats.Diskio_service_bytes_read = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsRead)\n\tstats.Diskio_service_bytes_sync = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsSync)\n\tstats.Diskio_service_bytes_total = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsTotal)\n\tstats.Diskio_service_bytes_write = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsWrite)\n\n\tstats.Tasks_state_nr_sleeping = containerStats.TaskStats.NrSleeping\n\tstats.Tasks_state_nr_running = containerStats.TaskStats.NrRunning\n\tstats.Tasks_state_nr_stopped = containerStats.TaskStats.NrStopped\n\tstats.Tasks_state_nr_uninterruptible = containerStats.TaskStats.NrUninterruptible\n\tstats.Tasks_state_nr_io_wait = containerStats.TaskStats.NrIoWait\n\treturn stats, nil\n}", "func init() {\n\tmb.Registry.MustAddMetricSet(\"psoft\", \"stat\", New)\n}", "func (d *Dao) AddStatistics(c context.Context, s *model.Statistics) (id int64, err error) {\n\tvar res xsql.Result\n\tif res, err = d.db.Exec(c, _insertStatSQL, s.TargetMid, s.TargetID, s.EventID, s.State, s.Type, s.Quantity, s.Ctime, s.Quantity); err != nil {\n\t\treturn\n\t}\n\tif id, err = res.LastInsertId(); err != nil {\n\t\terr = errors.WithStack(err)\n\t\treturn\n\t}\n\treturn\n}", "func toContainerStats0(s *cgroups.Stats, ret *info.ContainerStats) {\n\tret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode\n\tret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode\n\tn := len(s.CpuStats.CpuUsage.PercpuUsage)\n\tret.Cpu.Usage.PerCpu = make([]uint64, n)\n\n\tret.Cpu.Usage.Total = 0\n\tfor i := 0; i < n; i++ {\n\t\tret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]\n\t\tret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i]\n\t}\n}", "func init() {\n\tmb.Registry.MustAddMetricSet(\"mssql\", \"performance\", New,\n\t\tmb.DefaultMetricSet(),\n\t\tmb.WithHostParser(mssql.HostParser))\n}", "func (d *DatasetContainer) setInfo(featureTags int, trainingSize int, testingSize int) {\n\td.Training.setInfo(featureTags, trainingSize)\n\n\tif testingSize != 0 {\n\t\td.Testing.setInfo(featureTags, trainingSize)\n\t}\n}", "func (m *URLMap) AddStatistics(requestMethod, requestURL, requestController string, requesttime time.Duration) {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\tif method, ok := m.urlmap[requestURL]; ok {\n\t\tif s, ok := method[requestMethod]; ok {\n\t\t\ts.RequestNum++\n\t\t\tif s.MaxTime < requesttime {\n\t\t\t\ts.MaxTime = requesttime\n\t\t\t}\n\t\t\tif s.MinTime > requesttime {\n\t\t\t\ts.MinTime = requesttime\n\t\t\t}\n\t\t\ts.TotalTime += requesttime\n\t\t} else {\n\t\t\tnb := &Statistics{\n\t\t\t\tRequestURL: requestURL,\n\t\t\t\tRequestController: requestController,\n\t\t\t\tRequestNum: 1,\n\t\t\t\tMinTime: requesttime,\n\t\t\t\tMaxTime: requesttime,\n\t\t\t\tTotalTime: requesttime,\n\t\t\t}\n\t\t\tm.urlmap[requestURL][requestMethod] = nb\n\t\t}\n\t} else {\n\t\tif m.LengthLimit > 0 && m.LengthLimit <= len(m.urlmap) {\n\t\t\treturn\n\t\t}\n\t\tmethodmap := make(map[string]*Statistics)\n\t\tnb := &Statistics{\n\t\t\tRequestURL: requestURL,\n\t\t\tRequestController: requestController,\n\t\t\tRequestNum: 1,\n\t\t\tMinTime: requesttime,\n\t\t\tMaxTime: requesttime,\n\t\t\tTotalTime: requesttime,\n\t\t}\n\t\tmethodmap[requestMethod] = nb\n\t\tm.urlmap[requestURL] = methodmap\n\t}\n}", "func (r *AWSApiGatewayUsagePlan_ApiStage) SetMetadata(metadata map[string]interface{}) {\n\tr._metadata = metadata\n}", "func UpdateStatistics(settings *playfab.Settings, postData *UpdateStatisticsRequestModel, entityToken string) (*UpdateStatisticsResponseModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Statistic/UpdateStatistics\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &UpdateStatisticsResponseModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func setDutHealths(ctx context.Context, tr fleet.TrackerServer, pb *dutpool.Balancer) error {\n\tif err := setDutHealthsForPool(ctx, tr, pb.Target); err != nil {\n\t\treturn err\n\t}\n\tif err := setDutHealthsForPool(ctx, tr, pb.Spare); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (v *IADsContainer) SetFilter(variant *ole.VARIANT) (err error) {\n\thr, _, _ := syscall.Syscall(\n\t\tuintptr(v.VTable().SetFilter),\n\t\t2,\n\t\tuintptr(unsafe.Pointer(v)),\n\t\tuintptr(unsafe.Pointer(variant)),\n\t\t0)\n\tif hr != 0 {\n\t\treturn convertHresultToError(hr)\n\t}\n\treturn nil\n}", "func (c *Container) Stats(ctx context.Context) (*types.StatsJSON, error) {\n\tresponseBody, err := c.client.ContainerStats(ctx, c.id, false /*stream*/)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ContainerStats failed: %v\", err)\n\t}\n\tdefer responseBody.Body.Close()\n\tvar v types.StatsJSON\n\tif err := json.NewDecoder(responseBody.Body).Decode(&v); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode container stats: %v\", err)\n\t}\n\treturn &v, nil\n}", "func (o *GetHistogramStatByParams) SetStat(stat string) {\n\to.Stat = stat\n}", "func StatsAggregatorSetSummaryWriter(scope *Scope, stats_aggregator tf.Output, summary tf.Output) (o *tf.Operation) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"StatsAggregatorSetSummaryWriter\",\n\t\tInput: []tf.Input{\n\t\t\tstats_aggregator, summary,\n\t\t},\n\t}\n\treturn scope.AddOperation(opspec)\n}" ]
[ "0.6701678", "0.57483083", "0.55879396", "0.55502206", "0.55451113", "0.551975", "0.5456196", "0.54074156", "0.5349192", "0.52138686", "0.52063566", "0.5174852", "0.5165561", "0.51111734", "0.50242907", "0.5015697", "0.49920654", "0.49733508", "0.49718073", "0.49158075", "0.4903652", "0.48993087", "0.4885719", "0.48594946", "0.48375875", "0.48170906", "0.47906297", "0.47905505", "0.4785378", "0.47811103", "0.47477463", "0.47397983", "0.47270373", "0.4705759", "0.47044396", "0.46752307", "0.46688288", "0.4660362", "0.46541926", "0.46504924", "0.46288154", "0.45917052", "0.45885608", "0.45866212", "0.45865068", "0.45638853", "0.45610887", "0.45587704", "0.4558597", "0.4540526", "0.45357832", "0.4518752", "0.45028552", "0.4501828", "0.449909", "0.4493329", "0.44909444", "0.4485197", "0.44640917", "0.4462052", "0.44586852", "0.44406018", "0.4436738", "0.44360211", "0.44356957", "0.44257507", "0.44254503", "0.4424055", "0.44177595", "0.44133404", "0.44103754", "0.4408372", "0.4406613", "0.44046104", "0.44032186", "0.43921182", "0.43754947", "0.43698075", "0.43672085", "0.43585497", "0.4358392", "0.43576083", "0.43518868", "0.4350181", "0.43480852", "0.43476492", "0.43450436", "0.43425378", "0.43371338", "0.43313986", "0.4330766", "0.4309471", "0.42999175", "0.42989418", "0.42915937", "0.42893118", "0.42817897", "0.42747372", "0.42737442", "0.42667508" ]
0.7775303
0
GetStatistics returns container statistics with other meta data such as the container name
func (cs *Stats) GetStatistics() StatsEntry { cs.mutex.Lock() defer cs.mutex.Unlock() return cs.StatsEntry }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (container *container) Statistics() (Statistics, error) {\r\n\tproperties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics)\r\n\tif err != nil {\r\n\t\treturn Statistics{}, convertSystemError(err, container)\r\n\t}\r\n\r\n\treturn properties.Statistics, nil\r\n}", "func (c *Container) GetContainerStats(previousStats *ContainerStats) (*ContainerStats, error) {\n\treturn nil, define.ErrOSNotSupported\n}", "func (n *mockAgent) statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error) {\n\treturn &ContainerStats{}, nil\n}", "func (c *Container) GetContainerStats(previousStats *ContainerStats) (*ContainerStats, error) {\n\treturn nil, ErrOSNotSupported\n}", "func ContainerStats(client *criapi.RuntimeServiceClient, opts statsOptions, name string) (*MetricsV2, error) {\n\tfilter := &criapi.ContainerStatsFilter{}\n\tif opts.id != \"\" {\n\t\tfilter.Id = opts.id\n\t}\n\tif opts.podID != \"\" {\n\t\tfilter.PodSandboxId = opts.podID\n\t}\n\tif opts.labels != nil {\n\t\tfilter.LabelSelector = opts.labels\n\t}\n\trequest := &criapi.ListContainerStatsRequest{\n\t\tFilter: filter,\n\t}\n\n\tmetrics := &MetricsV2{}\n\tvar err error\n\tif metrics, err = displayStats(client, request, name); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn metrics, nil\n}", "func (c *Client) containerStats(ctx context.Context, id string) (*containerdtypes.Metric, error) {\n\tif !c.lock.TrylockWithRetry(ctx, id) {\n\t\treturn nil, errtypes.ErrLockfailed\n\t}\n\tdefer c.lock.Unlock(id)\n\n\tpack, err := c.watch.get(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmetrics, err := pack.task.Metrics(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn metrics, nil\n}", "func (c *Container) Stats(ctx context.Context) (*types.StatsJSON, error) {\n\tresponseBody, err := c.client.ContainerStats(ctx, c.id, false /*stream*/)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ContainerStats failed: %v\", err)\n\t}\n\tdefer responseBody.Body.Close()\n\tvar v types.StatsJSON\n\tif err := json.NewDecoder(responseBody.Body).Decode(&v); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode container stats: %v\", err)\n\t}\n\treturn &v, nil\n}", "func ContainerStats (ctx context.Context, cli *client.Client, containerId string) StatsEntry{\n\tvar MaxStats = StatsEntry{}\n\n\terrChan := make(chan error, 1)\n\tdoneChan\t\t\t:= make (chan bool)\n\tgo collect (ctx, containerId, cli, true, &MaxStats, doneChan, errChan)\n\tfor {\n\t\tselect {\n\t\tcase <-doneChan:\n\t\t\treturn MaxStats\n\t\tcase <-errChan:\n\t\t\treturn MaxStats\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn MaxStats\n}", "func (c *Client) ContainerStats(ctx context.Context, id string) (*containerdtypes.Metric, error) {\n\tmetric, err := c.containerStats(ctx, id)\n\tif err != nil {\n\t\treturn metric, convertCtrdErr(err)\n\t}\n\treturn metric, nil\n}", "func (kvdata *KVData) GetStatistics() map[string]interface{} {\n\trespch := make(chan []interface{}, 1)\n\tcmd := []interface{}{kvCmdGetStats, respch}\n\tresp, _ := c.FailsafeOp(kvdata.sbch, respch, cmd, kvdata.genServerStopCh)\n\treturn resp[0].(map[string]interface{})\n}", "func (r *remoteRuntimeService) ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) {\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] ContainerStats\", \"containerID\", containerID, \"timeout\", r.timeout)\n\tctx, cancel := context.WithTimeout(ctx, r.timeout)\n\tdefer cancel()\n\n\treturn r.containerStatsV1(ctx, containerID)\n}", "func TestContainerStats(t *testing.T) {\n\tt.Logf(\"Create a pod config and run sandbox container\")\n\tsb, sbConfig := PodSandboxConfigWithCleanup(t, \"sandbox1\", \"stats\")\n\n\tpauseImage := images.Get(images.Pause)\n\tEnsureImageExists(t, pauseImage)\n\n\tt.Logf(\"Create a container config and run container in a pod\")\n\tcontainerConfig := ContainerConfig(\n\t\t\"container1\",\n\t\tpauseImage,\n\t\tWithTestLabels(),\n\t\tWithTestAnnotations(),\n\t)\n\tcn, err := runtimeService.CreateContainer(sb, containerConfig, sbConfig)\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\tassert.NoError(t, runtimeService.RemoveContainer(cn))\n\t}()\n\trequire.NoError(t, runtimeService.StartContainer(cn))\n\tdefer func() {\n\t\tassert.NoError(t, runtimeService.StopContainer(cn, 10))\n\t}()\n\n\tt.Logf(\"Fetch stats for container\")\n\tvar s *runtime.ContainerStats\n\trequire.NoError(t, Eventually(func() (bool, error) {\n\t\ts, err = runtimeService.ContainerStats(cn)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif s.GetWritableLayer().GetTimestamp() != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}, time.Second, 30*time.Second))\n\n\tt.Logf(\"Verify stats received for container %q\", cn)\n\ttestStats(t, s, containerConfig)\n}", "func (this *ContainerCtl) Stats(ctx context.Context) {\n\tvar id string\n\tid = ctx.Params().Get(\"id\")\n\n\tcli := GetDockerClient()\n\tresp, err := cli.ContainerStats(stdContext.Background(), id, true)\n\tif err != nil {\n\t\tlog.Println(\"ContainerExport err:\", err.Error())\n\t\tthis.ReturnJSon(ctx, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tthis.ReturnJSon(ctx, http.StatusOK, \"ok\", resp)\n\treturn\n}", "func convertContainerStats(containerStats *container_info.ContainerStats, latestTime *time.Time) (DetailContainerStats, error) {\n\n\tif containerStats.Timestamp.After(*latestTime) {\n\t\t*latestTime = containerStats.Timestamp\n\t}\n\n\tvar stats DetailContainerStats\n\tstats.Timestamp = containerStats.Timestamp\n\tstats.Cpu_usage_seconds_total = containerStats.Cpu.Usage.Total\n\tstats.Cpu_user_seconds_total = containerStats.Cpu.Usage.User\n\tstats.Cpu_system_seconds_total = containerStats.Cpu.Usage.System\n\n\tstats.Memory_usage_bytes = containerStats.Memory.Usage\n\t//stats.Memory_limit_bytes = containerStats.Memory.Usage\n\tstats.Memory_cache = containerStats.Memory.Cache\n\tstats.Memory_rss = containerStats.Memory.RSS\n\tstats.Memory_swap = containerStats.Memory.Swap\n\n\tstats.Network_receive_bytes_total = containerStats.Network.RxBytes\n\tstats.Network_receive_packets_total = containerStats.Network.RxPackets\n\tstats.Network_receive_packets_dropped_total = containerStats.Network.RxDropped\n\tstats.Network_receive_errors_total = containerStats.Network.RxErrors\n\tstats.Network_transmit_bytes_total = containerStats.Network.TxBytes\n\tstats.Network_transmit_packets_total = containerStats.Network.TxPackets\n\tstats.Network_transmit_packets_dropped_total = containerStats.Network.TxDropped\n\tstats.Network_transmit_errors_total = containerStats.Network.TxErrors\n\n\tstats.Filesystem = converFsStats(containerStats.Filesystem)\n\n\tstats.Diskio_service_bytes_async = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsAsync)\n\tstats.Diskio_service_bytes_read = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsRead)\n\tstats.Diskio_service_bytes_sync = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsSync)\n\tstats.Diskio_service_bytes_total = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsTotal)\n\tstats.Diskio_service_bytes_write = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsWrite)\n\n\tstats.Tasks_state_nr_sleeping = containerStats.TaskStats.NrSleeping\n\tstats.Tasks_state_nr_running = containerStats.TaskStats.NrRunning\n\tstats.Tasks_state_nr_stopped = containerStats.TaskStats.NrStopped\n\tstats.Tasks_state_nr_uninterruptible = containerStats.TaskStats.NrUninterruptible\n\tstats.Tasks_state_nr_io_wait = containerStats.TaskStats.NrIoWait\n\treturn stats, nil\n}", "func (r *remoteRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] ListContainerStats\", \"filter\", filter)\n\t// Do not set timeout, because writable layer stats collection takes time.\n\t// TODO(random-liu): Should we assume runtime should cache the result, and set timeout here?\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\treturn r.listContainerStatsV1(ctx, filter)\n}", "func (m *Monitor) fetchStats(container dockerContainer, labelMap map[string]string, envMap map[string]string, enhancedMetricsConfig EnhancedMetricsConfig) {\n\tctx, cancel := context.WithTimeout(m.ctx, m.timeout)\n\tstats, err := m.client.ContainerStats(ctx, container.ID, false)\n\tif err != nil {\n\t\tcancel()\n\t\tif isContainerNotFound(err) {\n\t\t\tm.logger.Debugf(\"container %s is not found in cache\", container.ID)\n\t\t\treturn\n\t\t}\n\t\tm.logger.WithError(err).Errorf(\"Could not fetch docker stats for container id %s\", container.ID)\n\t\treturn\n\t}\n\n\tvar parsed dtypes.StatsJSON\n\terr = json.NewDecoder(stats.Body).Decode(&parsed)\n\tstats.Body.Close()\n\tif err != nil {\n\t\tcancel()\n\t\t// EOF means that there aren't any stats, perhaps because the container\n\t\t// is gone. Just return nothing and no error.\n\t\tif err == io.EOF {\n\t\t\treturn\n\t\t}\n\t\tm.logger.WithError(err).Errorf(\"Could not parse docker stats for container id %s\", container.ID)\n\t\treturn\n\t}\n\n\tdps, err := ConvertStatsToMetrics(container.ContainerJSON, &parsed, enhancedMetricsConfig)\n\tcancel()\n\tif err != nil {\n\t\tm.logger.WithError(err).Errorf(\"Could not convert docker stats for container id %s\", container.ID)\n\t\treturn\n\t}\n\n\tfor i := range dps {\n\t\tfor k, dimName := range envMap {\n\t\t\tif v := container.EnvMap[k]; v != \"\" {\n\t\t\t\tdps[i].Dimensions[dimName] = v\n\t\t\t}\n\t\t}\n\t\tfor k, dimName := range labelMap {\n\t\t\tif v := container.Config.Labels[k]; v != \"\" {\n\t\t\t\tdps[i].Dimensions[dimName] = v\n\t\t\t}\n\t\t}\n\t}\n\tm.Output.SendDatapoints(dps...)\n}", "func (t *task) Stats(_ context.Context) (*libcontainerdtypes.Stats, error) {\n\thc, err := t.getHCSContainer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treadAt := time.Now()\n\ts, err := hc.Statistics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &libcontainerdtypes.Stats{\n\t\tRead: readAt,\n\t\tHCSStats: &s,\n\t}, nil\n}", "func (dc *DockerClient) GetStatsFromContainer(id string, collectFs bool) (*wrapper.Statistics, error) {\n\tvar (\n\t\terr error\n\t\tpid int\n\t\tworkingSet uint64\n\n\t\tcontainer = &docker.Container{}\n\t\tgroupWrap = wrapper.Cgroups2Stats // wrapper for cgroup name and interface for stats extraction\n\t\tstats = wrapper.NewStatistics()\n\t)\n\n\tif !isHost(id) {\n\t\tif !isFullLengthID(id) {\n\t\t\treturn nil, fmt.Errorf(\"Container id %+v is not fully-length - cannot inspect container\", id)\n\t\t}\n\t\t// inspect container based only on fully-length container id.\n\t\tcontainer, err = dc.InspectContainer(id)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// take docker container PID\n\t\tpid = container.State.Pid\n\t}\n\n\tfor cg, stat := range groupWrap {\n\t\tgroupPath, err := getSubsystemPath(cg, id)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Cannot found subsystem path for cgroup=\", cg, \" for container id=\", container)\n\t\t\tcontinue\n\t\t}\n\t\t// get cgroup stats for given docker\n\t\terr = stat.GetStats(groupPath, stats.CgroupStats)\n\t\tif err != nil {\n\t\t\t// just log about it\n\t\t\tif isHost(id) {\n\t\t\t\tfmt.Fprintln(os.Stderr, \"Cannot obtain cgroups statistics for host, err=\", err)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(os.Stderr, \"Cannot obtain cgroups statistics for container: id=\", id, \", image=\", container.Image, \", name=\", container.Name, \", err=\", err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// calculate additional stats memory:working_set based on memory_stats\n\tif totalInactiveAnon, ok := stats.CgroupStats.MemoryStats.Stats[\"total_inactive_anon\"]; ok {\n\t\tworkingSet = stats.CgroupStats.MemoryStats.Usage.Usage\n\t\tif workingSet < totalInactiveAnon {\n\t\t\tworkingSet = 0\n\t\t} else {\n\t\t\tworkingSet -= totalInactiveAnon\n\t\t}\n\n\t\tif totalInactiveFile, ok := stats.CgroupStats.MemoryStats.Stats[\"total_inactive_file\"]; ok {\n\t\t\tif workingSet < totalInactiveFile {\n\t\t\t\tworkingSet = 0\n\t\t\t} else {\n\t\t\t\tworkingSet -= totalInactiveFile\n\t\t\t}\n\t\t}\n\t}\n\tstats.CgroupStats.MemoryStats.Stats[\"working_set\"] = workingSet\n\n\tif !isHost(id) {\n\t\trootFs := \"/\"\n\n\t\tstats.Network, err = network.NetworkStatsFromProc(rootFs, pid)\n\t\tif err != nil {\n\t\t\t// only log error message\n\t\t\tfmt.Fprintf(os.Stderr, \"Unable to get network stats, containerID=%+v, pid %d: %v\", container.ID, pid, err)\n\t\t}\n\n\t\tstats.Connection.Tcp, err = network.TcpStatsFromProc(rootFs, pid)\n\t\tif err != nil {\n\t\t\t// only log error message\n\t\t\tfmt.Fprintf(os.Stderr, \"Unable to get tcp stats from pid %d: %v\", pid, err)\n\t\t}\n\n\t\tstats.Connection.Tcp6, err = network.Tcp6StatsFromProc(rootFs, pid)\n\t\tif err != nil {\n\t\t\t// only log error message\n\t\t\tfmt.Fprintf(os.Stderr, \"Unable to get tcp6 stats from pid %d: %v\", pid, err)\n\t\t}\n\n\t} else {\n\t\tstats.Network, err = network.NetworkStatsFromRoot()\n\t\tif err != nil {\n\t\t\t// only log error message\n\t\t\tfmt.Fprintf(os.Stderr, \"Unable to get network stats, containerID=%v, %v\", id, err)\n\t\t}\n\n\t}\n\tif collectFs {\n\t\tstats.Filesystem, err = fs.GetFsStats(container)\n\t\tif err != nil {\n\t\t\t// only log error message\n\t\t\tfmt.Fprintf(os.Stderr, \"Unable to get filesystem stats for docker: %v, err=%v\", id, err)\n\t\t}\n\t}\n\n\treturn stats, nil\n}", "func Stats(runtime *cri.Runtime, name string) (*MetricsV2, error) {\n\n\topts := statsOptions{\n\t\tall: true,\n\t\tid: \"\",\n\t\tpodID: \"\",\n\t\tsample: time.Duration(2 * time.Second),\n\t\toutput: \"\",\n\t\twatch: false,\n\t}\n\tmetrics := &MetricsV2{}\n\tvar err error\n\tif metrics, err = ContainerStats(runtime.GetRuntimeClient(), opts, name); err != nil {\n\t\treturn nil, errors.Wrap(err, \"get container stats\")\n\t}\n\treturn metrics, nil\n}", "func (sb *Sandbox) Statistics() (map[string]*types.InterfaceStatistics, error) {\n\tm := make(map[string]*types.InterfaceStatistics)\n\n\tsb.mu.Lock()\n\tosb := sb.osSbox\n\tsb.mu.Unlock()\n\tif osb == nil {\n\t\treturn m, nil\n\t}\n\n\tvar err error\n\tfor _, i := range osb.Interfaces() {\n\t\tif m[i.DstName()], err = i.Statistics(); err != nil {\n\t\t\treturn m, err\n\t\t}\n\t}\n\n\treturn m, nil\n}", "func testStats(t *testing.T,\n\ts *runtime.ContainerStats,\n\tconfig *runtime.ContainerConfig,\n) {\n\trequire.NotEmpty(t, s.GetAttributes().GetId())\n\trequire.NotEmpty(t, s.GetAttributes().GetMetadata())\n\trequire.NotEmpty(t, s.GetAttributes().GetAnnotations())\n\trequire.Equal(t, s.GetAttributes().GetLabels(), config.Labels)\n\trequire.Equal(t, s.GetAttributes().GetAnnotations(), config.Annotations)\n\trequire.Equal(t, s.GetAttributes().GetMetadata().Name, config.Metadata.Name)\n\trequire.NotEmpty(t, s.GetAttributes().GetLabels())\n\trequire.NotEmpty(t, s.GetCpu().GetTimestamp())\n\trequire.NotEmpty(t, s.GetCpu().GetUsageCoreNanoSeconds().GetValue())\n\trequire.NotEmpty(t, s.GetMemory().GetTimestamp())\n\trequire.NotEmpty(t, s.GetMemory().GetWorkingSetBytes().GetValue())\n\trequire.NotEmpty(t, s.GetWritableLayer().GetTimestamp())\n\trequire.NotEmpty(t, s.GetWritableLayer().GetFsId().GetMountpoint())\n\n\t// UsedBytes of a fresh container can be zero on Linux, depending on the backing filesystem.\n\t// https://github.com/containerd/containerd/issues/7909\n\tif goruntime.GOOS == \"windows\" {\n\t\trequire.NotEmpty(t, s.GetWritableLayer().GetUsedBytes().GetValue())\n\t}\n\n\t// Windows does not collect inodes stats.\n\tif goruntime.GOOS != \"windows\" {\n\t\trequire.NotEmpty(t, s.GetWritableLayer().GetInodesUsed().GetValue())\n\t}\n}", "func (s *Server) ListContainerStats(ctx context.Context, req *types.ListContainerStatsRequest) (*types.ListContainerStatsResponse, error) {\n\tctrList, err := s.ContainerServer.ListContainers(\n\t\tfunc(container *oci.Container) bool {\n\t\t\treturn container.StateNoLock().Status != oci.ContainerStateStopped\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilter := req.Filter\n\tif filter != nil {\n\t\tcFilter := &types.ContainerFilter{\n\t\t\tID: req.Filter.ID,\n\t\t\tPodSandboxID: req.Filter.PodSandboxID,\n\t\t\tLabelSelector: req.Filter.LabelSelector,\n\t\t}\n\t\tctrList = s.filterContainerList(ctx, cFilter, ctrList)\n\t}\n\n\tallStats := make([]*types.ContainerStats, 0, len(ctrList))\n\tfor _, container := range ctrList {\n\t\tsb := s.GetSandbox(container.Sandbox())\n\t\tif sb == nil {\n\t\t\t// Because we don't lock, we will get situations where the container was listed, and then\n\t\t\t// its sandbox was deleted before we got to checking its stats.\n\t\t\t// We should not log in this expected situation.\n\t\t\tcontinue\n\t\t}\n\t\tcgroup := sb.CgroupParent()\n\t\tstats, err := s.Runtime().ContainerStats(ctx, container, cgroup)\n\t\tif err != nil {\n\t\t\t// ErrCgroupDeleted is another situation that will happen if the container\n\t\t\t// is deleted from underneath the call to this function.\n\t\t\tif !errors.Is(err, cgroups.ErrCgroupDeleted) {\n\t\t\t\t// The other errors are much less likely, and possibly useful to hear about.\n\t\t\t\tlog.Warnf(ctx, \"Unable to get stats for container %s: %v\", container.ID(), err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tresponse := s.buildContainerStats(ctx, stats, container)\n\t\tallStats = append(allStats, response)\n\t}\n\n\treturn &types.ListContainerStatsResponse{\n\t\tStats: allStats,\n\t}, nil\n}", "func (o *AggregatedDomain) Statistics(info *bambou.FetchingInfo) (StatisticsList, *bambou.Error) {\n\n\tvar list StatisticsList\n\terr := bambou.CurrentSession().FetchChildren(o, StatisticsIdentity, &list, info)\n\treturn list, err\n}", "func (cc *collectorCache) GetContainerStats(containerNS, containerID string, cacheValidity time.Duration) (*ContainerStats, error) {\n\tcurrentTime := time.Now()\n\tcacheKey := contCoreStatsCachePrefix + containerNS + containerID\n\n\tentry, found, err := cc.cache.Get(currentTime, cacheKey, cacheValidity)\n\tif found {\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn entry.(*ContainerStats), nil\n\t}\n\n\t// No cache, cacheValidity is 0 or too old value\n\tcstats, err := cc.collector.GetContainerStats(containerNS, containerID, cacheValidity)\n\tif err != nil {\n\t\tcc.cache.Store(currentTime, cacheKey, nil, err)\n\t\treturn nil, err\n\t}\n\n\tcc.cache.Store(currentTime, cacheKey, cstats, nil)\n\treturn cstats, nil\n}", "func (c CliCommunicator) Stats() ([]Stats, error) {\n\tout, err := exec.Command(c.DockerPath, c.Command...).Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontainers := strings.Split(string(out), \"\\n\")\n\tstats := make([]Stats, 0)\n\tfor _, con := range containers {\n\t\tif len(con) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar s Stats\n\t\tif err := json.Unmarshal([]byte(con), &s); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstats = append(stats, s)\n\t}\n\n\treturn stats, nil\n}", "func (d DockerStat) Gather() (Data, error) {\n\tlog.Debug(\"gathering docker stats\")\n\n\tif err := d.initClient(); err != nil {\n\t\treturn Data{}, err\n\t}\n\n\tcontainers, err := d.client.ContainerList(context.Background(), types.ContainerListOptions{All: false})\n\tif err != nil {\n\t\treturn Data{}, errors.Wrap(err, \"failed to list containers\")\n\t}\n\n\tdata := Data{}\n\tfor _, container := range containers {\n\t\tdimensions := GetDimensionsFromContainer(container, d.Label)\n\n\t\tstats, err := d.getStats(container.ID)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"failed to fetch statistics for container ID [%s]: %s\", container.ID, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcpuUtilization := NewDataPoint(\"CPUUtilization\", computeCpu(stats), UnitPercent, dimensions...)\n\t\tdata = append(data, &cpuUtilization)\n\n\t\tmemoryUtilization := NewDataPoint(\"MemoryUtilization\", float64(stats.MemoryStats.Usage), UnitBytes, dimensions...)\n\t\tdata = append(data, &memoryUtilization)\n\t}\n\n\treturn data, nil\n}", "func (client *Client) DescribeContainerStatisticsWithOptions(request *DescribeContainerStatisticsRequest, runtime *util.RuntimeOptions) (_result *DescribeContainerStatisticsResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.ClusterId)) {\n\t\tquery[\"ClusterId\"] = request.ClusterId\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"DescribeContainerStatistics\"),\n\t\tVersion: tea.String(\"2018-12-03\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &DescribeContainerStatisticsResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (c *Client) getStatistics() *AllStats {\n\n\tvar status Status\n\tstatusURL := fmt.Sprintf(statusURLPattern, c.protocol, c.hostname, c.port)\n\tbody := c.MakeRequest(statusURL)\n\terr := json.Unmarshal(body, &status)\n\tif err != nil {\n\t\tlog.Println(\"Unable to unmarshal Adguard log statistics to log statistics struct model\", err)\n\t}\n\n\tvar stats Stats\n\tstatsURL := fmt.Sprintf(statsURLPattern, c.protocol, c.hostname, c.port)\n\tbody = c.MakeRequest(statsURL)\n\terr = json.Unmarshal(body, &stats)\n\tif err != nil {\n\t\tlog.Println(\"Unable to unmarshal Adguard statistics to statistics struct model\", err)\n\t}\n\n\tvar logstats LogStats\n\tlogstatsURL := fmt.Sprintf(logstatsURLPattern, c.protocol, c.hostname, c.port, c.logLimit)\n\tbody = c.MakeRequest(logstatsURL)\n\terr = json.Unmarshal(body, &logstats)\n\tif err != nil {\n\t\tlog.Println(\"Unable to unmarshal Adguard log statistics to log statistics struct model\", err)\n\t}\n\n\tvar allstats AllStats\n\tallstats.status = &status\n\tallstats.stats = &stats\n\tallstats.logStats = &logstats\n\n\treturn &allstats\n}", "func TestContainerListStats(t *testing.T) {\n\tvar (\n\t\tstats []*runtime.ContainerStats\n\t\terr error\n\t)\n\tt.Logf(\"Create a pod config and run sandbox container\")\n\tsb, sbConfig := PodSandboxConfigWithCleanup(t, \"running-pod\", \"statsls\")\n\n\tpauseImage := images.Get(images.Pause)\n\tEnsureImageExists(t, pauseImage)\n\n\tt.Logf(\"Create a container config and run containers in a pod\")\n\tcontainerConfigMap := make(map[string]*runtime.ContainerConfig)\n\tfor i := 0; i < 3; i++ {\n\t\tcName := fmt.Sprintf(\"container%d\", i)\n\t\tcontainerConfig := ContainerConfig(\n\t\t\tcName,\n\t\t\tpauseImage,\n\t\t\tWithTestLabels(),\n\t\t\tWithTestAnnotations(),\n\t\t)\n\t\tcn, err := runtimeService.CreateContainer(sb, containerConfig, sbConfig)\n\t\trequire.NoError(t, err)\n\t\tcontainerConfigMap[cn] = containerConfig\n\t\tdefer func() {\n\t\t\tassert.NoError(t, runtimeService.RemoveContainer(cn))\n\t\t}()\n\t\trequire.NoError(t, runtimeService.StartContainer(cn))\n\t\tdefer func() {\n\t\t\tassert.NoError(t, runtimeService.StopContainer(cn, 10))\n\t\t}()\n\t}\n\n\tt.Logf(\"Fetch all container stats\")\n\trequire.NoError(t, Eventually(func() (bool, error) {\n\t\tstats, err = runtimeService.ListContainerStats(&runtime.ContainerStatsFilter{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, s := range stats {\n\t\t\tif s.GetWritableLayer().GetTimestamp() == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}, time.Second, 30*time.Second))\n\n\tt.Logf(\"Verify all container stats\")\n\tfor _, s := range stats {\n\t\ttestStats(t, s, containerConfigMap[s.GetAttributes().GetId()])\n\t}\n}", "func GetStatistics() (Statistics, error) {\n\tstats.RLock()\n\tdefer stats.RUnlock()\n\n\ts := Statistics{\n\t\tUpSince: stats.startTime,\n\t\tTotalCount: stats.successfulCounter + stats.failedCounter,\n\t\tSuccessfulCounter: stats.successfulCounter,\n\t\tFailedCounter: stats.failedCounter,\n\t\tAverageResponseTime: 0,\n\t}\n\n\tif s.TotalCount == 0 {\n\t\treturn s, nil\n\t}\n\n\tavgNs := int64(stats.totalResponseTime) / int64(s.TotalCount)\n\tavgSec := float64(avgNs) / float64(time.Second)\n\ts.AverageResponseTime = math.Floor(avgSec*1000) / 1000\n\n\treturn s, nil\n}", "func (ooc *MockOpenoltClient) CollectStatistics(ctx context.Context, in *openolt.Empty, opts ...grpc.CallOption) (*openolt.Empty, error) {\n\treturn &openolt.Empty{}, nil\n}", "func (f *RemoteRuntime) ContainerStats(ctx context.Context, req *kubeapi.ContainerStatsRequest) (*kubeapi.ContainerStatsResponse, error) {\n\tstats, err := f.RuntimeService.ContainerStats(ctx, req.ContainerId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &kubeapi.ContainerStatsResponse{Stats: stats}, nil\n}", "func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, previousStats *define.ContainerStats) error {\n\tnow := uint64(time.Now().UnixNano())\n\n\tjailName, err := c.jailName()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting jail name: %w\", err)\n\t}\n\n\tentries, err := rctl.GetRacct(\"jail:\" + jailName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to read accounting for %s: %w\", jailName, err)\n\t}\n\n\t// If the current total usage is less than what was previously\n\t// recorded then it means the container was restarted and runs\n\t// in a new jail\n\tif dur, ok := entries[\"wallclock\"]; ok {\n\t\tif previousStats.Duration > dur*1000000000 {\n\t\t\tpreviousStats = &define.ContainerStats{}\n\t\t}\n\t}\n\n\tfor key, val := range entries {\n\t\tswitch key {\n\t\tcase \"cputime\": // CPU time, in seconds\n\t\t\tstats.CPUNano = val * 1000000000\n\t\t\tstats.AvgCPU = calculateCPUPercent(stats.CPUNano, 0, now, uint64(c.state.StartedTime.UnixNano()))\n\t\tcase \"datasize\": // data size, in bytes\n\t\tcase \"stacksize\": // stack size, in bytes\n\t\tcase \"coredumpsize\": // core dump size, in bytes\n\t\tcase \"memoryuse\": // resident set size, in bytes\n\t\t\tstats.MemUsage = val\n\t\tcase \"memorylocked\": // locked memory, in bytes\n\t\tcase \"maxproc\": // number of processes\n\t\t\tstats.PIDs = val\n\t\tcase \"openfiles\": // file descriptor table size\n\t\tcase \"vmemoryuse\": // address space limit, in bytes\n\t\tcase \"pseudoterminals\": // number of PTYs\n\t\tcase \"swapuse\": // swap space that may be reserved or used, in bytes\n\t\tcase \"nthr\": // number of threads\n\t\tcase \"msgqqueued\": // number of queued SysV messages\n\t\tcase \"msgqsize\": // SysV message queue size, in bytes\n\t\tcase \"nmsgq\": // number of SysV message queues\n\t\tcase \"nsem\": // number of SysV semaphores\n\t\tcase \"nsemop\": // number of SysV semaphores modified in a single semop(2) call\n\t\tcase \"nshm\": // number of SysV shared memory segments\n\t\tcase \"shmsize\": // SysV shared memory size, in bytes\n\t\tcase \"wallclock\": // wallclock time, in seconds\n\t\t\tstats.Duration = val * 1000000000\n\t\t\tstats.UpTime = time.Duration(stats.Duration)\n\t\tcase \"pcpu\": // %CPU, in percents of a single CPU core\n\t\t\tstats.CPU = float64(val)\n\t\tcase \"readbps\": // filesystem reads, in bytes per second\n\t\t\tstats.BlockInput = val\n\t\tcase \"writebps\": // filesystem writes, in bytes per second\n\t\t\tstats.BlockOutput = val\n\t\tcase \"readiops\": // filesystem reads, in operations per second\n\t\tcase \"writeiops\": // filesystem writes, in operations per second\n\t\t}\n\t}\n\tstats.MemLimit = c.getMemLimit()\n\tstats.SystemNano = now\n\n\tnetStats, err := getContainerNetIO(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Handle case where the container is not in a network namespace\n\tif netStats != nil {\n\t\tstats.NetInput = netStats.RxBytes\n\t\tstats.NetOutput = netStats.TxBytes\n\t} else {\n\t\tstats.NetInput = 0\n\t\tstats.NetOutput = 0\n\t}\n\n\treturn nil\n}", "func (o *VRS) Statistics(info *bambou.FetchingInfo) (StatisticsList, *bambou.Error) {\n\n\tvar list StatisticsList\n\terr := bambou.CurrentSession().FetchChildren(o, StatisticsIdentity, &list, info)\n\treturn list, err\n}", "func statsWrapper(\n\tc *client.Client,\n\tctx context.Context,\n\tcontainerID string,\n\tstream bool,\n) (types.ContainerStats, error) {\n\tif c != nil {\n\t\treturn c.ContainerStats(ctx, containerID, stream)\n\t}\n\tfc := FakeDockerClient{}\n\treturn fc.ContainerStats(ctx, containerID, stream)\n}", "func GetStatistics(w http.ResponseWriter, req *http.Request) {\n\t//log\n\tnow, userIP := globalPkg.GetIP(req)\n\tlogobj := logpkg.LogStruct{\"_\", now, userIP, \"macAdress\", \"GetStatistics\", \"dashboard\", \"_\", \"_\", \"_\", 0}\n\tif !admin.AdminAPIDecoderAndValidation(w,req.Body,logobj){\n\t\treturn\n\t}\n\t\tstatObj := statStruct{}\n\t\tstatObj.NumberOfBlocks = globalPkg.ConvertFixedLengthStringtoInt(block.GetLastBlock().BlockIndex) + 1\n\t\tstatObj.NumberOfTransactions = len(transaction.GetPendingTransactions())\n\t\tstatObj.NumberOfValidator = len(validator.ValidatorsLstObj)\n\t\tfor _, obj := range validator.ValidatorsLstObj {\n\t\t\tstatObj.NumberOfStakeCoin = append(statObj.NumberOfStakeCoin, obj.ValidatorIP+\"_\"+strconv.FormatFloat(obj.ValidatorStakeCoins, 'f', 6, 64))\n\t\t}\n\t\tsendJSON, _ := json.Marshal(statObj)\n\t\tglobalPkg.SendResponse(w, sendJSON)\n\t\tglobalPkg.WriteLog(logobj, \"get number of blocks and transaction success\", \"success\")\n\treturn\n}", "func (p *Projector) GetStatistics() c.Statistics {\n\trespch := make(chan []interface{}, 1)\n\tcmd := []interface{}{pCmdGetStatistics, respch}\n\tresp, _ := c.FailsafeOp(p.reqch, respch, cmd, p.finch)\n\treturn resp[1].(c.Statistics)\n}", "func (c *Container) Stat() (s *Stat) {\n\n\ts = new(Stat)\n\n\ts.CXDS.RPS = c.Cache.stat.dbRPS()\n\ts.CXDS.WPS = c.Cache.stat.dbWPS()\n\n\ts.Cache.RPS = c.Cache.stat.cRPS()\n\ts.Cache.WPS = c.Cache.stat.cWPS()\n\n\ts.CacheCleaning = c.Cache.stat.cacheCleaning()\n\n\tvar amount, volume = c.amountVolume() // of cache\n\n\ts.CacheObjects.Amount = statutil.Amount(amount)\n\ts.CacheObjects.Volume = statutil.Volume(volume)\n\n\tvar all, used = c.db.CXDS().Amount()\n\n\ts.AllObjects.Amount = statutil.Amount(all)\n\ts.UsedObjects.Amount = statutil.Amount(used)\n\n\tall, used = c.db.CXDS().Volume()\n\n\ts.AllObjects.Volume = statutil.Volume(all)\n\ts.UsedObjects.Volume = statutil.Volume(used)\n\n\ts.RootsPerSecond = c.Index.stat.rootsPerSecond()\n\n\ts.Feeds = c.Index.feedsStat()\n\n\treturn\n}", "func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string, pid int) (*info.ContainerStats, error) {\n\tcgroupStats, err := cgroupManager.GetStats()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlibcontainerStats := &libcontainer.Stats{\n\t\tCgroupStats: cgroupStats,\n\t}\n\tstats := toContainerStats(libcontainerStats)\n\n\t// TODO(rjnagal): Use networking stats directly from libcontainer.\n\tstats.Network.Interfaces = make([]info.InterfaceStats, len(networkInterfaces))\n\tfor i := range networkInterfaces {\n\t\tinterfaceStats, err := sysinfo.GetNetworkStats(networkInterfaces[i])\n\t\tif err != nil {\n\t\t\treturn stats, err\n\t\t}\n\t\tstats.Network.Interfaces[i] = interfaceStats\n\t}\n\n\t// If we know the pid & we haven't discovered any network interfaces yet\n\t// try the network namespace.\n\tif pid > 0 && len(stats.Network.Interfaces) == 0 {\n\t\tnsStats, err := networkStatsFromNs(pid)\n\t\tif err != nil {\n\t\t\tglog.V(2).Infof(\"Unable to get network stats from pid %d: %v\", pid, err)\n\t\t} else {\n\t\t\tstats.Network.Interfaces = append(stats.Network.Interfaces, nsStats...)\n\t\t}\n\t}\n\n\t// For backwards compatibility.\n\tif len(stats.Network.Interfaces) > 0 {\n\t\tstats.Network.InterfaceStats = stats.Network.Interfaces[0]\n\t}\n\n\treturn stats, nil\n}", "func PoolStats(name string) (map[string]interface{}, error) {\n\tcmd := &Cmd{}\n\tres := make(map[string]interface{})\n\terr := NvlistIoctl(zfsHandle.Fd(), ZFS_IOC_POOL_STATS, name, cmd, nil, res, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif cmd.Cookie != 0 {\n\t\treturn nil, syscall.Errno(cmd.Cookie)\n\t}\n\treturn res, nil\n}", "func GetStatistics() (FullStatistics, error) {\n\n\tresp, err := http.Get(eosStatsURL)\n\tif err != nil {\n\t\treturn FullStatistics{}, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tret, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn FullStatistics{}, err\n\t}\n\n\tvar array []*DailyStatistics\n\n\tif errr := json.Unmarshal(ret, &array); errr != nil {\n\t\tlog.Fatal(errr)\n\t\treturn FullStatistics{}, errr\n\t}\n\n\tvar res = make(map[string]*DailyStatistics, len(array))\n\tfor index, d := range array {\n\t\tres[strconv.Itoa(index)] = d\n\t}\n\n\tfull := FullStatistics{Daily: res}\n\treturn full, err\n}", "func (sr *ServicedStatsReporter) gatherStats(t time.Time) []Sample {\n\tstats := []Sample{}\n\t// Handle the host metrics.\n\treg, _ := sr.hostRegistry.(*metrics.StandardRegistry)\n\treg.Each(func(name string, i interface{}) {\n\t\ttagmap := map[string]string{\n\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t}\n\t\tswitch metric := i.(type) {\n\t\tcase metrics.Gauge:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\tcase metrics.GaugeFloat64:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t}\n\t})\n\t// Handle each container's metrics.\n\tfor key, registry := range sr.containerRegistries {\n\t\treg, _ := registry.(*metrics.StandardRegistry)\n\t\treg.Each(func(name string, i interface{}) {\n\t\t\ttagmap := map[string]string{\n\t\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t\t\t\"controlplane_service_id\": key.serviceID,\n\t\t\t\t\"controlplane_instance_id\": strconv.FormatInt(int64(key.instanceID), 10),\n\t\t\t}\n\t\t\tswitch metric := i.(type) {\n\t\t\tcase metrics.Gauge:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\t\tcase metrics.GaugeFloat64:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t\t}\n\t\t})\n\t}\n\treturn stats\n}", "func (f *RemoteRuntime) ListContainerStats(ctx context.Context, req *kubeapi.ListContainerStatsRequest) (*kubeapi.ListContainerStatsResponse, error) {\n\tstats, err := f.RuntimeService.ListContainerStats(ctx, req.Filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &kubeapi.ListContainerStatsResponse{Stats: stats}, nil\n}", "func Statistics(commands <-chan parser.Cmd) (*Stats, error) {\n\tstats := Stats{\n\t\tPropertiesPerType: make(map[string]int),\n\t\tPropertiesPerDepth: make(map[int]int),\n\t\tNodesPerDepth: make(map[int]int),\n\t\tValuesPerSize: make(map[int]int),\n\t}\n\n\tif err := stats.parse(commands); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &stats, nil\n}", "func (dc *DockerEnvContainer) Stat() BackendCommonInformation {\n\treturn BackendCommonInformation{\n\t\tURL: dc.url,\n\t\tPort: dc.Port,\n\t\tName: dc.ContainerName,\n\t\tID: dc.ContainerID,\n\t}\n}", "func GetStatistics() (*Statistics, error) {\n\treturn GetStatisticsFromPath(defaultLogPath)\n}", "func (b *Bar) GetStatistics() *Statistics {\n\ts := b.getState()\n\treturn newStatistics(&s)\n}", "func GetVolumeStats(address string, obj interface{}) (error, int) {\n\tcontroller, err := NewControllerClient(address)\n\tif err != nil {\n\t\treturn err, -1\n\t}\n\turl := controller.address + \"/stats\"\n\tresp, err := controller.httpClient.Get(url)\n\tif resp != nil {\n\t\tif resp.StatusCode == 500 {\n\t\t\treturn err, 500\n\t\t} else if resp.StatusCode == 503 {\n\t\t\treturn err, 503\n\t\t}\n\t} else {\n\t\treturn err, -1\n\t}\n\tif err != nil {\n\t\treturn err, -1\n\t}\n\tdefer resp.Body.Close()\n\trc := json.NewDecoder(resp.Body).Decode(obj)\n\treturn rc, 0\n}", "func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {\n\tok, err := c.StatsSet()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif ok {\n\t\treturn c.possibleStats, nil\n\t}\n\treturn nil, nil\n}", "func (client *Client) DescribeContainerStatistics(request *DescribeContainerStatisticsRequest) (_result *DescribeContainerStatisticsResponse, _err error) {\n\truntime := &util.RuntimeOptions{}\n\t_result = &DescribeContainerStatisticsResponse{}\n\t_body, _err := client.DescribeContainerStatisticsWithOptions(request, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = _body\n\treturn _result, _err\n}", "func (s *VicStreamProxy) StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error {\n\top := trace.FromContext(ctx, \"\")\n\tdefer trace.End(trace.Begin(config.ContainerID, op))\n\topID := op.ID()\n\n\tif s.client == nil {\n\t\treturn errors.NillPortlayerClientError(\"StreamProxy\")\n\t}\n\n\t// create a child context that we control\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tparams := containers.NewGetContainerStatsParamsWithContext(op).WithOpID(&opID)\n\tparams.ID = config.ContainerID\n\tparams.Stream = config.Stream\n\n\tconfig.Ctx = ctx\n\tconfig.Cancel = cancel\n\n\t// create our converter\n\tcontainerConverter := convert.NewContainerStats(config)\n\t// provide the writer for the portLayer and start listening for metrics\n\twriter := containerConverter.Listen()\n\tif writer == nil {\n\t\t// problem with the listener\n\t\treturn errors.InternalServerError(fmt.Sprintf(\"unable to gather container(%s) statistics\", config.ContainerID))\n\t}\n\n\t_, err := s.client.Containers.GetContainerStats(params, writer)\n\tif err != nil {\n\t\tswitch err := err.(type) {\n\t\tcase *containers.GetContainerStatsNotFound:\n\t\t\treturn errors.NotFoundError(config.ContainerID)\n\t\tcase *containers.GetContainerStatsInternalServerError:\n\t\t\treturn errors.InternalServerError(\"Server error from the interaction port layer\")\n\t\tdefault:\n\t\t\tif ctx.Err() == context.Canceled {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t//Check for EOF. Since the connection, transport, and data handling are\n\t\t\t//encapsulated inside of Swagger, we can only detect EOF by checking the\n\t\t\t//error string\n\t\t\tif strings.Contains(err.Error(), SwaggerSubstringEOF) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn errors.InternalServerError(fmt.Sprintf(\"Unknown error from the interaction port layer: %s\", err))\n\t\t}\n\t}\n\treturn nil\n}", "func (s *Service) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"opentsdb\",\n\t\tTags: s.defaultTags.Merge(tags),\n\t\tValues: map[string]interface{}{\n\t\t\tstatHTTPConnectionsHandled: atomic.LoadInt64(&s.stats.HTTPConnectionsHandled),\n\t\t\tstatTelnetConnectionsActive: atomic.LoadInt64(&s.stats.ActiveTelnetConnections),\n\t\t\tstatTelnetConnectionsHandled: atomic.LoadInt64(&s.stats.HandledTelnetConnections),\n\t\t\tstatTelnetPointsReceived: atomic.LoadInt64(&s.stats.TelnetPointsReceived),\n\t\t\tstatTelnetBytesReceived: atomic.LoadInt64(&s.stats.TelnetBytesReceived),\n\t\t\tstatTelnetReadError: atomic.LoadInt64(&s.stats.TelnetReadError),\n\t\t\tstatTelnetBadLine: atomic.LoadInt64(&s.stats.TelnetBadLine),\n\t\t\tstatTelnetBadTime: atomic.LoadInt64(&s.stats.TelnetBadTime),\n\t\t\tstatTelnetBadTag: atomic.LoadInt64(&s.stats.TelnetBadTag),\n\t\t\tstatTelnetBadFloat: atomic.LoadInt64(&s.stats.TelnetBadFloat),\n\t\t\tstatBatchesTransmitted: atomic.LoadInt64(&s.stats.BatchesTransmitted),\n\t\t\tstatPointsTransmitted: atomic.LoadInt64(&s.stats.PointsTransmitted),\n\t\t\tstatBatchesTransmitFail: atomic.LoadInt64(&s.stats.BatchesTransmitFail),\n\t\t\tstatConnectionsActive: atomic.LoadInt64(&s.stats.ActiveConnections),\n\t\t\tstatConnectionsHandled: atomic.LoadInt64(&s.stats.HandledConnections),\n\t\t\tstatDroppedPointsInvalid: atomic.LoadInt64(&s.stats.InvalidDroppedPoints),\n\t\t},\n\t}}\n}", "func toContainerStats0(s *cgroups.Stats, ret *info.ContainerStats) {\n\tret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode\n\tret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode\n\tn := len(s.CpuStats.CpuUsage.PercpuUsage)\n\tret.Cpu.Usage.PerCpu = make([]uint64, n)\n\n\tret.Cpu.Usage.Total = 0\n\tfor i := 0; i < n; i++ {\n\t\tret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]\n\t\tret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i]\n\t}\n}", "func getContainerCount(mb model.MessageBody) int {\n\tswitch v := mb.(type) {\n\tcase *model.CollectorProc:\n\t\treturn len(v.GetContainers())\n\tcase *model.CollectorRealTime:\n\t\treturn len(v.GetContainerStats())\n\tcase *model.CollectorContainer:\n\t\treturn len(v.GetContainers())\n\tcase *model.CollectorContainerRealTime:\n\t\treturn len(v.GetStats())\n\tcase *model.CollectorConnections:\n\t\treturn 0\n\t}\n\treturn 0\n}", "func NewStats(container string) *Stats {\n\treturn &Stats{StatsEntry: StatsEntry{Container: container}}\n}", "func CreateStats(cluster, namespace, volumeName, deploymentName, mountPath, pathRestic, podName string) string {\n\tvar stats map[string]interface{}\n\tvar nameStats string\n\tif cluster == \"ClusterFrom\" {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_from\")\n\t\tnameStats = \"statsFrom\"\n\t} else {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_to\")\n\t\tnameStats = \"statsTo\"\n\t}\n\n\tauxName := \"stats-\" + deploymentName\n\tsizeVolume := utils.GetSizeVolume(podName, volumeName, mountPath)\n\tstats[\"name\"] = auxName\n\tstats[\"size\"] = sizeVolume\n\terr := utils.WriteJson(pathRestic, nameStats, stats)\n\tif err != nil {\n\t\tfmt.Println(\"Error creating \" + auxName)\n\t}\n\treturn sizeVolume\n}", "func (s *VarlinkInterface) GetContainerStatsWithHistory(ctx context.Context, c VarlinkCall, previousStats_ ContainerStats) error {\n\treturn c.ReplyMethodNotImplemented(ctx, \"io.podman.GetContainerStatsWithHistory\")\n}", "func (client *NginxClient) GetStats() (*Stats, error) {\n\tinfo, err := client.GetNginxInfo()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tcaches, err := client.GetCaches()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tprocesses, err := client.GetProcesses()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tslabs, err := client.GetSlabs()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tcons, err := client.GetConnections()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\trequests, err := client.GetHTTPRequests()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tssl, err := client.GetSSL()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tzones, err := client.GetServerZones()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tupstreams, err := client.GetUpstreams()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tstreamZones, err := client.GetStreamServerZones()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tstreamUpstreams, err := client.GetStreamUpstreams()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tstreamZoneSync, err := client.GetStreamZoneSync()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tlocationZones, err := client.GetLocationZones()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tresolvers, err := client.GetResolvers()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tlimitReqs, err := client.GetHTTPLimitReqs()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tlimitConnsHTTP, err := client.GetHTTPConnectionsLimit()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tlimitConnsStream, err := client.GetStreamConnectionsLimit()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\tworkers, err := client.GetWorkers()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get stats: %w\", err)\n\t}\n\n\treturn &Stats{\n\t\tNginxInfo: *info,\n\t\tCaches: *caches,\n\t\tProcesses: *processes,\n\t\tSlabs: *slabs,\n\t\tConnections: *cons,\n\t\tHTTPRequests: *requests,\n\t\tSSL: *ssl,\n\t\tServerZones: *zones,\n\t\tStreamServerZones: *streamZones,\n\t\tUpstreams: *upstreams,\n\t\tStreamUpstreams: *streamUpstreams,\n\t\tStreamZoneSync: streamZoneSync,\n\t\tLocationZones: *locationZones,\n\t\tResolvers: *resolvers,\n\t\tHTTPLimitRequests: *limitReqs,\n\t\tHTTPLimitConnections: *limitConnsHTTP,\n\t\tStreamLimitConnections: *limitConnsStream,\n\t\tWorkers: workers,\n\t}, nil\n}", "func GetStats(args *Args, format string) string {\n\tcfg := config.GetConfig(args.ConfigFile)\n\t// init statistic for record\n\tstatistic.InitStatistic(cfg.Statistic)\n\n\tallQueueStatistic := []*statistic.QueueStatistic{}\n\n\tfor _, cc := range cfg.Redis {\n\t\tfor _, queueConfig := range cc.Queues {\n\t\t\ts := &statistic.QueueStatistic{\n\t\t\t\tQueueName: queueConfig.QueueName,\n\t\t\t\tSourceType: \"Redis\",\n\t\t\t\tIsEnabled: queueConfig.IsEnabled,\n\t\t\t}\n\n\t\t\tqi := &redis.QueueInstance{\n\t\t\t\tSource: cc.Config,\n\t\t\t\tQueue: queueConfig,\n\t\t\t}\n\n\t\t\tif queueConfig.IsDelayQueue {\n\t\t\t\ts.Normal, _ = qi.DelayLength(queueConfig.QueueName)\n\t\t\t} else {\n\t\t\t\ts.Normal, _ = qi.Length(queueConfig.QueueName)\n\t\t\t}\n\n\t\t\tif len(queueConfig.DelayOnFailure) > 0 {\n\t\t\t\tqueueName := fmt.Sprintf(\"%s:delayed\", queueConfig.QueueName)\n\t\t\t\ts.Delayed, _ = qi.DelayLength(queueName)\n\t\t\t}\n\n\t\t\ts.Success, _ = statistic.GetCounter(fmt.Sprintf(\"%s:success\", queueConfig.QueueName))\n\t\t\ts.Failure, _ = statistic.GetCounter(fmt.Sprintf(\"%s:failure\", queueConfig.QueueName))\n\n\t\t\ts.Total = s.Normal + s.Delayed + s.Success + s.Failure\n\n\t\t\tallQueueStatistic = append(allQueueStatistic, s)\n\t\t}\n\t}\n\n\tfor _, cc := range cfg.RabbitMQ {\n\t\tfor _, queueConfig := range cc.Queues {\n\t\t\ts := &statistic.QueueStatistic{\n\t\t\t\tQueueName: queueConfig.QueueName,\n\t\t\t\tSourceType: \"RabbitMQ\",\n\t\t\t\tIsEnabled: queueConfig.IsEnabled,\n\t\t\t}\n\n\t\t\t// qi := &rabbitmq.QueueInstance{\n\t\t\t// \tSource: cc.Config,\n\t\t\t// \tQueue: queueConfig,\n\t\t\t// }\n\t\t\t// todo get queue length\n\n\t\t\ts.Normal = 0\n\t\t\ts.Delayed = 0\n\n\t\t\ts.Success, _ = statistic.GetCounter(fmt.Sprintf(\"%s:success\", queueConfig.QueueName))\n\t\t\ts.Failure, _ = statistic.GetCounter(fmt.Sprintf(\"%s:failure\", queueConfig.QueueName))\n\n\t\t\ts.Total = s.Normal + s.Delayed + s.Success + s.Failure\n\n\t\t\tallQueueStatistic = append(allQueueStatistic, s)\n\t\t}\n\t}\n\n\tif \"json\" == format {\n\t\toutput, err := json.Marshal(allQueueStatistic)\n\n\t\tif nil != err {\n\t\t\treturn \"\"\n\t\t}\n\n\t\treturn string(output)\n\t}\n\n\toutput := fmt.Sprintf(\"%s %s statistics information\\n\\n\", constant.APPNAME, constant.APPVERSION)\n\tfor _, s := range allQueueStatistic {\n\t\tstatus := \"disable\"\n\t\tif s.IsEnabled {\n\t\t\tstatus = \"enable\"\n\t\t}\n\t\toutput += fmt.Sprintf(\" > Type: %-8s Status: %-8s Name: %s\\n%10d Total\\n%10d Normal\\n%10d Delayed\\n%10d Success\\n%10d Failure\\n\\n\", s.SourceType, status, s.QueueName, s.Total, s.Normal, s.Delayed, s.Success, s.Failure)\n\t}\n\n\tif \"html\" == format {\n\t\tstrings.Replace(output, \"\\n\", \"<br />\", -1)\n\t}\n\n\treturn output\n}", "func (d DockerHealth) Gather() (Data, error) {\n\tlog.Debug(\"gathering docker health\")\n\n\tif err := d.initClient(); err != nil {\n\t\treturn Data{}, err\n\t}\n\n\tcontainers, err := d.client.ContainerList(context.Background(), types.ContainerListOptions{All: false})\n\tif err != nil {\n\t\treturn Data{}, errors.Wrap(err, \"failed to list containers\")\n\t}\n\n\tdata := Data{}\n\tfor _, container := range containers {\n\t\tdimensions := GetDimensionsFromContainer(container, d.Label)\n\n\t\tc, err := d.client.ContainerInspect(context.Background(), container.ID)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"failed to inspect container ID [%s]: %s\", container.ID, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar value = 0.0\n\t\tif c.State != nil && c.State.Health != nil && strings.ToLower(c.State.Health.Status) == \"healthy\" {\n\t\t\tvalue = 1.0\n\t\t}\n\t\thealthDataPoint := NewDataPoint(\"Health\", value, UnitCount, dimensions...)\n\t\tdata = append(data, &healthDataPoint)\n\t}\n\n\treturn data, nil\n}", "func (_e *MockDataCoord_Expecter) GetCollectionStatistics(ctx interface{}, req interface{}) *MockDataCoord_GetCollectionStatistics_Call {\n\treturn &MockDataCoord_GetCollectionStatistics_Call{Call: _e.mock.On(\"GetCollectionStatistics\", ctx, req)}\n}", "func (hg *HostGroup) GetInstanceStatistics(ctx context.Context, params InstanceStatisticsParams) ([]byte, error) {\n\treturn hg.client.PostInOut(ctx, \"/api/v1.0/HostGroup.GetInstanceStatistics\", params, nil)\n}", "func (t *Compose) Stats() {\n\tif !t.statsRunning {\n\t\tt.statsRunning = true\n\t\tt.statsQuit = make(chan struct{})\n\t\tgo func() {\n\t\t\trunning := false\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-t.statsQuit:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tif !running {\n\t\t\t\t\t\trunning = true\n\t\t\t\t\t\tcmd := exec.Command(\"docker\", \"stats\", \"--no-stream\", \"--format\", \"\\\"{{.Name}}\\\\t{{.CPUPerc}}\\\\t{{.MemUsage}}\\\\t{{.MemPerc}}\\\"\")\n\t\t\t\t\t\tout, err := cmd.Output()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tt.emitError(err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\treg := regexp.MustCompile(\"\\n\")\n\t\t\t\t\t\tlines := reg.Split(string(out), -1)\n\t\t\t\t\t\tlines = lines[:len(lines)-1]\n\t\t\t\t\t\tstatsa := []stats{}\n\t\t\t\t\t\tfor _, line := range lines {\n\t\t\t\t\t\t\treg = regexp.MustCompile(\"\\t\")\n\t\t\t\t\t\t\tcontArr := reg.Split(line, -1)\n\t\t\t\t\t\t\tname := strings.Replace(contArr[0], \"_1\", \"\", -1)\n\t\t\t\t\t\t\tname = strings.Replace(name, t.vuexState.Store.Settings.ContainerPrefix+\"_\", \"\", -1)\n\t\t\t\t\t\t\tname = strings.Replace(name, `\"`, \"\", -1)\n\t\t\t\t\t\t\tstat := stats{\n\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\tCPUPercString: contArr[1],\n\t\t\t\t\t\t\t\tCPUPerc: strings.Replace(contArr[1], `%`, \"\", -1),\n\t\t\t\t\t\t\t\tMemoryUseage: contArr[2],\n\t\t\t\t\t\t\t\tMemoryPercentString: strings.Replace(contArr[3], `\"`, \"\", -1),\n\t\t\t\t\t\t\t\tMemoryPercent: strings.Replace(contArr[3], `%\"`, \"\", -1),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tstatsa = append(statsa, stat)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tres, mErr := json.Marshal(statsa)\n\t\t\t\t\t\tif mErr != nil {\n\t\t\t\t\t\t\tt.emitError(err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\tuEnc := b64.URLEncoding.EncodeToString(res)\n\t\t\t\t\t\tt.runtime.Events.Emit(\"stats\", uEnc)\n\t\t\t\t\t\trunning = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t}\n\t\t}()\n\t}\n}", "func GetStats() Stats {\r\n\r\n\treturn stats\r\n}", "func (c *Consumer) ContainerMetrics(appGuid string, authToken string) ([]*events.ContainerMetric, error) {\n\tmessages := make([]*events.ContainerMetric, 0, 200)\n\tcallback := func(envelope *events.Envelope) error {\n\t\tif envelope.GetEventType() == events.Envelope_LogMessage {\n\t\t\treturn errors.New(fmt.Sprintf(\"Upstream error: %s\", envelope.GetLogMessage().GetMessage()))\n\t\t}\n\t\tmessages = append(messages, envelope.GetContainerMetric())\n\t\treturn nil\n\t}\n\terr := c.readTC(appGuid, authToken, \"containermetrics\", callback)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnoaa.SortContainerMetrics(messages)\n\treturn messages, err\n}", "func (o ContainerResourceMetricStatusOutput) Container() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricStatus) string { return v.Container }).(pulumi.StringOutput)\n}", "func (o ContainerResourceMetricStatusOutput) Container() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricStatus) string { return v.Container }).(pulumi.StringOutput)\n}", "func (c *vertexCollection) Statistics(ctx context.Context) (CollectionStatistics, error) {\n\tresult, err := c.rawCollection().Statistics(ctx)\n\tif err != nil {\n\t\treturn CollectionStatistics{}, WithStack(err)\n\t}\n\treturn result, nil\n}", "func Stats(c *libvirt.Connect, uuid string) error {\n\t//Check exists\n\td, err := c.LookupDomainByUUIDString(uuid)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to lookup: %s\", err)\n\t}\n\n\t//Check is running\n\ts, _, err := d.GetState()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed check state: %s\", err)\n\t}\n\tif s != libvirt.DOMAIN_RUNNING {\n\t\treturn fmt.Errorf(\"domain not running: %d\", s)\n\t}\n\n\tmemStats, err := memStats(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"STAT: %+v\\n\", memStats)\n\tfmt.Printf(\"STAT Used: %+v\\n\", memStats.Available-memStats.Unused)\n\tfmt.Printf(\"STAT Last: %s\\n\", time.Unix(int64(memStats.LastUpdate), 0))\n\n\tcpuStats, total, err := cpuStats(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%+v\\n\", cpuStats)\n\tfmt.Printf(\"Total: %+#v\\n\", total)\n\n\tnetStats, err := netStats(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"NET: %+v\\n\", netStats)\n\n\t_, dTotal, err := diskStats(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"DISK: %+v\\n\", dTotal)\n\n\treturn nil\n}", "func (ooc *MockOpenoltClient) GetOnuStatistics(ctx context.Context, in *openolt.Onu, opts ...grpc.CallOption) (*openolt.OnuStatistics, error) {\n\treturn &openolt.OnuStatistics{}, nil\n}", "func GetStats(c *gin.Context) {\n\tstore := c.MustGet(\"store\").(*Store)\n\tstats := Stats{\n\t\tDocumentCount: store.Count(),\n\t\tDocumentBytes: 0,\n\t}\n\n\tc.JSON(http.StatusOK, stats)\n}", "func GetStats() (string, error) {\n\ttotalStats, err := getStats()\n\tstats := new(statistics)\n\tstats.NumAveraged = totalStats.totalCount\n\tstats.Average = totalStats.totalDuration / totalStats.totalCount\n\tb, err := json.Marshal(stats)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Received error when marshalling JSON: %s\", err)\n\t\tlog.Printf(msg)\n\t\treturn \"\", errors.New(msg)\n\t}\n\treturn string(b), nil\n}", "func (p *containerProvider) GetContainers(cacheValidity time.Duration, previousContainers map[string]*ContainerRateMetrics) ([]*model.Container, map[string]*ContainerRateMetrics, map[int]string, error) {\n\tcontainersMetadata := p.metadataStore.ListContainersWithFilter(workloadmeta.GetRunningContainers)\n\n\thostCPUCount := float64(system.HostCPUCount())\n\tprocessContainers := make([]*model.Container, 0)\n\trateStats := make(map[string]*ContainerRateMetrics)\n\tpidToCid := make(map[int]string)\n\tfor _, container := range containersMetadata {\n\t\tvar annotations map[string]string\n\t\tif pod, err := p.metadataStore.GetKubernetesPodForContainer(container.ID); err == nil {\n\t\t\tannotations = pod.Annotations\n\t\t}\n\n\t\tif p.filter != nil && p.filter.IsExcluded(annotations, container.Name, container.Image.Name, container.Labels[kubernetes.CriContainerNamespaceLabel]) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif container.Runtime == workloadmeta.ContainerRuntimeGarden && len(container.CollectorTags) == 0 {\n\t\t\tlog.Debugf(\"No tags found for garden container: %s, skipping\", container.ID)\n\t\t\tcontinue\n\t\t}\n\n\t\tentityID := containers.BuildTaggerEntityName(container.ID)\n\t\ttags, err := tagger.Tag(entityID, collectors.HighCardinality)\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"Could not collect tags for container %q, err: %v\", container.ID[:12], err)\n\t\t}\n\t\ttags = append(tags, container.CollectorTags...)\n\n\t\toutPreviousStats := NullContainerRates\n\t\t// Name and Image fields exist but are never filled\n\t\tprocessContainer := &model.Container{\n\t\t\tType: convertContainerRuntime(container.Runtime),\n\t\t\tId: container.ID,\n\t\t\tStarted: container.State.StartedAt.Unix(),\n\t\t\tCreated: container.State.CreatedAt.Unix(),\n\t\t\tTags: tags,\n\t\t\tState: convertContainerStatus(container.State.Status),\n\t\t\tHealth: convertHealthStatus(container.State.Health),\n\t\t\tAddresses: computeContainerAddrs(container),\n\t\t}\n\t\t// Always adding container if we have metadata as we do want to report containers without stats\n\t\tprocessContainers = append(processContainers, processContainer)\n\n\t\t// Gathering container & network statistics\n\t\tpreviousContainerRates := previousContainers[container.ID]\n\t\tif previousContainerRates == nil {\n\t\t\tpreviousContainerRates = &NullContainerRates\n\t\t}\n\n\t\tcollector := p.metricsProvider.GetCollector(string(container.Runtime))\n\t\tif collector == nil {\n\t\t\tlog.Infof(\"No metrics collector available for runtime: %s, skipping container: %s\", container.Runtime, container.ID)\n\t\t\tcontinue\n\t\t}\n\n\t\tcontainerStats, err := collector.GetContainerStats(container.Namespace, container.ID, cacheValidity)\n\t\tif err != nil || containerStats == nil {\n\t\t\tlog.Debugf(\"Container stats for: %+v not available through collector %q, err: %v\", container, collector.ID(), err)\n\t\t\t// If main container stats are missing, we skip the container\n\t\t\tcontinue\n\t\t}\n\t\tcomputeContainerStats(hostCPUCount, containerStats, previousContainerRates, &outPreviousStats, processContainer)\n\n\t\t// Building PID to CID mapping for NPM\n\t\tif containerStats.PID != nil {\n\t\t\tfor _, pid := range containerStats.PID.PIDs {\n\t\t\t\tpidToCid[pid] = container.ID\n\t\t\t}\n\t\t}\n\n\t\tcontainerNetworkStats, err := collector.GetContainerNetworkStats(container.Namespace, container.ID, cacheValidity)\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"Container network stats for: %+v not available through collector %q, err: %v\", container, collector.ID(), err)\n\t\t}\n\t\tcomputeContainerNetworkStats(containerNetworkStats, previousContainerRates, &outPreviousStats, processContainer)\n\n\t\t// Storing previous stats\n\t\trateStats[processContainer.Id] = &outPreviousStats\n\t}\n\n\treturn processContainers, rateStats, pidToCid, nil\n}", "func (a *Client) GetRuntimeStatistics(params *GetRuntimeStatisticsParams, authInfo runtime.ClientAuthInfoWriter) (*GetRuntimeStatisticsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetRuntimeStatisticsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetRuntimeStatistics\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/runtimes/statistics\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetRuntimeStatisticsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetRuntimeStatisticsOK), nil\n\n}", "func (s *SingleRuntime) GetStats() *models.NativeStatsCollection {\n\trAPI := \"\"\n\tif s.worker != 0 {\n\t\trAPI = fmt.Sprintf(\"%s@%v\", s.socketPath, s.worker)\n\t} else {\n\t\trAPI = s.socketPath\n\t}\n\tresult := &models.NativeStatsCollection{RuntimeAPI: rAPI}\n\trawdata, err := s.ExecuteRaw(\"show stat\")\n\tif err != nil {\n\t\tresult.Error = err.Error()\n\t\treturn result\n\t}\n\tlines := strings.Split(rawdata[2:], \"\\n\")\n\tstats := []*models.NativeStat{}\n\tkeys := strings.Split(lines[0], \",\")\n\t//data := []map[string]string{}\n\tfor i := 1; i < len(lines); i++ {\n\t\tdata := map[string]string{}\n\t\tline := strings.Split(lines[i], \",\")\n\t\tif len(line) < len(keys) {\n\t\t\tcontinue\n\t\t}\n\t\tfor index, key := range keys {\n\t\t\tif len(line[index]) > 0 {\n\t\t\t\tdata[key] = line[index]\n\t\t\t}\n\t\t}\n\t\toneLineData := &models.NativeStat{}\n\t\ttString := strings.ToLower(line[1])\n\t\tif tString == \"backend\" || tString == \"frontend\" {\n\t\t\toneLineData.Name = line[0]\n\t\t\toneLineData.Type = tString\n\t\t} else {\n\t\t\toneLineData.Name = tString\n\t\t\toneLineData.Type = \"server\"\n\t\t\toneLineData.BackendName = line[0]\n\t\t}\n\n\t\tvar st models.NativeStatStats\n\t\tdecoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{\n\t\t\tResult: &st,\n\t\t\tWeaklyTypedInput: true,\n\t\t\tTagName: \"json\",\n\t\t})\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = decoder.Decode(data)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\toneLineData.Stats = &st\n\n\t\tstats = append(stats, oneLineData)\n\t}\n\tresult.Stats = stats\n\treturn result\n}", "func GetConnPoolStats(client *mongo.Client) *ConnPoolStats {\n\tresult := &ConnPoolStats{}\n\terr := client.Database(\"admin\").RunCommand(context.TODO(), bson.D{{\"connPoolStats\", 1}, {\"recordStats\", 0}}).Decode(result)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get connPoolStats: %s.\", err)\n\t\treturn nil\n\t}\n\treturn result\n}", "func containerInspect(ociBin string, name string) (*RunResult, error) {\n\treturn runCmd(exec.Command(ociBin, \"inspect\", name))\n}", "func (s StreamService) GetStreamsSummary() {\n\n}", "func (cc *collectorCache) GetContainerNetworkStats(containerNS, containerID string, cacheValidity time.Duration) (*ContainerNetworkStats, error) {\n\t// Generics could be useful. Meanwhile copy-paste.\n\tcurrentTime := time.Now()\n\tcacheKey := contNetStatsCachePrefix + containerNS + containerID\n\n\tentry, found, err := cc.cache.Get(currentTime, cacheKey, cacheValidity)\n\tif found {\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn entry.(*ContainerNetworkStats), nil\n\t}\n\n\t// No cache, cacheValidity is 0 or too old value\n\tval, err := cc.collector.GetContainerNetworkStats(containerNS, containerID, cacheValidity)\n\tif err != nil {\n\t\tcc.cache.Store(currentTime, cacheKey, nil, err)\n\t\treturn nil, err\n\t}\n\n\tcc.cache.Store(currentTime, cacheKey, val, nil)\n\treturn val, nil\n}", "func (s *Service) GetResourcesStats(start, end time.Time) (*statistic.BaseStats, error) {\n\tresult := statistic.NewBaseStats()\n\n\tsLogs, err := s.store.FetchLogs(start, end, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor len(sLogs) > 0 {\n\t\terr = result.CalculateLogs(sLogs)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstart = sLogs[len(sLogs)-1].CreatedAt\n\n\t\tsLogs, err = s.store.FetchLogs(start, end, \"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func TestContainerConsumedStats(t *testing.T) {\n\tt.Logf(\"Create a pod config and run sandbox container\")\n\tsb, sbConfig := PodSandboxConfigWithCleanup(t, \"sandbox1\", \"stats\")\n\n\ttestImage := images.Get(images.ResourceConsumer)\n\tEnsureImageExists(t, testImage)\n\n\tt.Logf(\"Create a container config and run container in a pod\")\n\tcontainerConfig := ContainerConfig(\n\t\t\"container1\",\n\t\ttestImage,\n\t\tWithTestLabels(),\n\t\tWithTestAnnotations(),\n\t)\n\tcn, err := runtimeService.CreateContainer(sb, containerConfig, sbConfig)\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\tassert.NoError(t, runtimeService.RemoveContainer(cn))\n\t}()\n\trequire.NoError(t, runtimeService.StartContainer(cn))\n\tdefer func() {\n\t\tassert.NoError(t, runtimeService.StopContainer(cn, 10))\n\t}()\n\n\tt.Logf(\"Fetch initial stats for container\")\n\tvar s *runtime.ContainerStats\n\trequire.NoError(t, Eventually(func() (bool, error) {\n\t\ts, err = runtimeService.ContainerStats(cn)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif goruntime.GOOS == \"windows\" {\n\t\t\tif s.GetMemory().GetWorkingSetBytes().GetValue() > 0 {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t} else {\n\t\t\tif s.GetWritableLayer().GetTimestamp() > 0 {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t}, time.Second, 30*time.Second))\n\n\tinitialMemory := s.GetMemory().GetWorkingSetBytes().GetValue()\n\tt.Logf(\"Initial container memory consumption is %f MB. Consume 100 MB and expect the reported stats to increase accordingly\", float64(initialMemory)/(1024*1024))\n\n\t// consume 100 MB memory for 30 seconds.\n\tvar command []string\n\tif goruntime.GOOS == \"windows\" {\n\t\t// -d: Leak and touch memory in specified MBs\n\t\t// -c: Count of number of objects to allocate\n\t\tcommand = []string{\"testlimit.exe\", \"-accepteula\", \"-d\", \"25\", \"-c\", \"4\"}\n\t} else {\n\t\tcommand = []string{\"stress\", \"-m\", \"1\", \"--vm-bytes\", \"100M\", \"--vm-hang\", \"0\", \"-t\", \"30\"}\n\t}\n\n\tgo func() {\n\t\t_, _, err = runtimeService.ExecSync(cn, command, 30*time.Second)\n\t}()\n\n\trequire.NoError(t, Eventually(func() (bool, error) {\n\t\ts, err = runtimeService.ContainerStats(cn)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif s.GetMemory().GetWorkingSetBytes().GetValue() > initialMemory+100*1024*1024 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}, time.Second, 30*time.Second))\n}", "func (t *Tracer) GetStats() (map[string]interface{}, error) {\n\treturn nil, ebpf.ErrNotImplemented\n}", "func (r *KubeRunner) XDSStatistics(name, ns string) string {\n\treturn fmt.Sprintf(\"XDS statistics of %s/%s: CDS count: %d EDS count: %d\",\n\t\tns, name,\n\t\tr.CountOfCDS(name, ns),\n\t\tr.CountOfEDS(name, ns))\n}", "func (m *Monitor) Stats(ctx *context.Context) {\n\tctx.JSON(m.Holder.GetStats())\n}", "func (is *ImageServer) GetStats() (count int, avgWidth int, avgHeight int) {\n\tis.RLock()\n\tdefer is.RUnlock()\n\n\tcount = is.NumImages\n\tif is.NumImages > 0 {\n\t\tavgWidth = is.WidthSum / is.NumImages\n\t\tavgHeight = is.HeightSum / is.NumImages\n\t}\n\n\treturn\n}", "func (e *Entity) ContainerMetric() metric_dao.ContainerMetric {\n\n\tvar (\n\t\tcontainerMetric metric_dao.ContainerMetric\n\t)\n\n\tcontainerMetric = metric_dao.ContainerMetric{\n\t\tNamespace: e.Namespace,\n\t\tPodName: e.PodName,\n\t\tContainerName: e.ContainerName,\n\t\tMetrics: map[metric.ContainerMetricType][]metric.Sample{\n\t\t\tmetric.TypeContainerCPUUsageSecondsPercentage: e.Samples,\n\t\t},\n\t}\n\n\treturn containerMetric\n}", "func (stat *NetworkStat) GetStats() StatMap {\n\tstats := make(map[string][]float64, len(stat.count))\n\n\tfor i, node := range stat.bn.GetNodes() {\n\t\tstats[node.Name()] = []float64{\n\t\t\tfloat64(stat.count[i]) / float64(stat.total),\n\t\t\tfloat64(stat.total-stat.count[i]) / float64(stat.total),\n\t\t}\n\t}\n\treturn stats\n}", "func (ds *DiscoveryService) GetCacheStats(_ *restful.Request, response *restful.Response) {\n\tstats := make(map[string]*discoveryCacheStatEntry)\n\tfor k, v := range ds.sdsCache.stats() {\n\t\tstats[k] = v\n\t}\n\tfor k, v := range ds.cdsCache.stats() {\n\t\tstats[k] = v\n\t}\n\tfor k, v := range ds.rdsCache.stats() {\n\t\tstats[k] = v\n\t}\n\tif err := response.WriteEntity(discoveryCacheStats{stats}); err != nil {\n\t\tglog.Warning(err)\n\t}\n}", "func (p *cadvisorStatsProvider) ListPodStats(_ context.Context) ([]statsapi.PodStats, error) {\n\t// Gets node root filesystem information and image filesystem stats, which\n\t// will be used to populate the available and capacity bytes/inodes in\n\t// container stats.\n\trootFsInfo, err := p.cadvisor.RootFsInfo()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get rootFs info: %v\", err)\n\t}\n\timageFsInfo, err := p.cadvisor.ImagesFsInfo()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get imageFs info: %v\", err)\n\t}\n\tinfos, err := getCadvisorContainerInfo(p.cadvisor)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get container info from cadvisor: %v\", err)\n\t}\n\n\tfilteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(infos)\n\t// Map each container to a pod and update the PodStats with container data.\n\tpodToStats := map[statsapi.PodReference]*statsapi.PodStats{}\n\tfor key, cinfo := range filteredInfos {\n\t\t// On systemd using devicemapper each mount into the container has an\n\t\t// associated cgroup. We ignore them to ensure we do not get duplicate\n\t\t// entries in our summary. For details on .mount units:\n\t\t// http://man7.org/linux/man-pages/man5/systemd.mount.5.html\n\t\tif strings.HasSuffix(key, \".mount\") {\n\t\t\tcontinue\n\t\t}\n\t\t// Build the Pod key if this container is managed by a Pod\n\t\tif !isPodManagedContainer(&cinfo) {\n\t\t\tcontinue\n\t\t}\n\t\tref := buildPodRef(cinfo.Spec.Labels)\n\n\t\t// Lookup the PodStats for the pod using the PodRef. If none exists,\n\t\t// initialize a new entry.\n\t\tpodStats, found := podToStats[ref]\n\t\tif !found {\n\t\t\tpodStats = &statsapi.PodStats{PodRef: ref}\n\t\t\tpodToStats[ref] = podStats\n\t\t}\n\n\t\t// Update the PodStats entry with the stats from the container by\n\t\t// adding it to podStats.Containers.\n\t\tcontainerName := kubetypes.GetContainerName(cinfo.Spec.Labels)\n\t\tif containerName == leaky.PodInfraContainerName {\n\t\t\t// Special case for infrastructure container which is hidden from\n\t\t\t// the user and has network stats.\n\t\t\tpodStats.Network = cadvisorInfoToNetworkStats(&cinfo)\n\t\t} else {\n\t\t\tcontainerStat := cadvisorInfoToContainerStats(containerName, &cinfo, &rootFsInfo, &imageFsInfo)\n\t\t\t// NOTE: This doesn't support the old pod log path, `/var/log/pods/UID`. For containers\n\t\t\t// using old log path, they will be populated by cadvisorInfoToContainerStats.\n\t\t\tpodUID := types.UID(podStats.PodRef.UID)\n\t\t\tlogs, err := p.hostStatsProvider.getPodContainerLogStats(podStats.PodRef.Namespace, podStats.PodRef.Name, podUID, containerName, &rootFsInfo)\n\t\t\tif err != nil {\n\t\t\t\tklog.ErrorS(err, \"Unable to fetch container log stats\", \"containerName\", containerName)\n\t\t\t} else {\n\t\t\t\tcontainerStat.Logs = logs\n\t\t\t}\n\t\t\tpodStats.Containers = append(podStats.Containers, *containerStat)\n\t\t}\n\t}\n\n\t// Add each PodStats to the result.\n\tresult := make([]statsapi.PodStats, 0, len(podToStats))\n\tfor _, podStats := range podToStats {\n\t\tmakePodStorageStats(podStats, &rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, false)\n\n\t\tpodUID := types.UID(podStats.PodRef.UID)\n\t\t// Lookup the pod-level cgroup's CPU and memory stats\n\t\tpodInfo := getCadvisorPodInfoFromPodUID(podUID, allInfos)\n\t\tif podInfo != nil {\n\t\t\tcpu, memory := cadvisorInfoToCPUandMemoryStats(podInfo)\n\t\t\tpodStats.CPU = cpu\n\t\t\tpodStats.Memory = memory\n\t\t\tpodStats.Swap = cadvisorInfoToSwapStats(podInfo)\n\t\t\tpodStats.ProcessStats = cadvisorInfoToProcessStats(podInfo)\n\t\t}\n\n\t\tstatus, found := p.statusProvider.GetPodStatus(podUID)\n\t\tif found && status.StartTime != nil && !status.StartTime.IsZero() {\n\t\t\tpodStats.StartTime = *status.StartTime\n\t\t\t// only append stats if we were able to get the start time of the pod\n\t\t\tresult = append(result, *podStats)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (db *MongoDatabase) GetStats(collection_name string, fields []string) (map[string]interface{}, error) {\n\tcurrent_session := db.GetSession()\n\tdefer current_session.Close()\n\n\tcollection := current_session.DB(db.name).C(collection_name)\n\n\titer := collection.Find(nil).Iter()\n\n\tstats := GetDefaultStats()\n\n\tcount := 0\n\n\tvar result map[string]interface{}\n\tfor iter.Next(&result) {\n\t\tcount += 1\n\n\t\terr := AddEntryToStats(stats, result, fields)\n\n\t\tif err != nil {\n\t\t\treturn nil, convertMgoError(err)\n\t\t}\n\t}\n\n\terr := iter.Err()\n\n\tif err != nil {\n\t\treturn nil, convertMgoError(err)\n\t}\n\n\terr = iter.Close()\n\n\tif err != nil {\n\t\treturn nil, convertMgoError(err)\n\t}\n\n\tstats[\"count\"] = count\n\n\treturn stats, nil\n}", "func StatsSummary() *client.StatsSummaryResponse {\n\treturn &client.StatsSummaryResponse{\n\t\tResponse: []client.StatsSummary{\n\t\t\tclient.StatsSummary{\n\t\t\t\tSummaryTime: \"2015-05-14 14:39:47\",\n\t\t\t\tDeliveryService: \"test-ds1\",\n\t\t\t\tStatName: \"test-stat\",\n\t\t\t\tStatValue: \"3.1415\",\n\t\t\t\tCDNName: \"test-cdn\",\n\t\t\t},\n\t\t},\n\t}\n}", "func (c *CloudWatch) GetMetricStatistics(req *GetMetricStatisticsInput) (resp *GetMetricStatisticsResult, err error) {\n\tresp = &GetMetricStatisticsResult{}\n\terr = c.client.Do(\"GetMetricStatistics\", \"POST\", \"/\", req, resp)\n\treturn\n}", "func TestStatistics(t *testing.T) {\n\tet, err := createExplorerTester(\"TestStatistics\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tstats := et.explorer.Statistics()\n\tif stats.Height != et.explorer.blockchainHeight || et.explorer.blockchainHeight == 0 {\n\t\tt.Error(\"wrong height reported in stats object\")\n\t}\n\tif stats.TransactionCount != et.explorer.transactionCount || et.explorer.transactionCount == 0 {\n\t\tt.Error(\"wrong transaction count reported in stats object\")\n\t}\n}", "func GuestsGetStats(endpoint string) (int, []byte) {\n\tbuffer := getEndpointwithGuests(endpoint)\n\tbuffer.WriteString(\"/stats\")\n\n\tstatus, data := hq.Get(buffer.String())\n\n\treturn status, data\n}", "func (c *Container) GetClusterMetric(ctx echo.Context) error {\n metricsParam := strings.Split(ctx.QueryParam(\"metrics\"), \",\")\n clusterType := ctx.QueryParam(\"cluster_type\")\n nodeParam := ctx.QueryParam(\"node_name\")\n nodeList := []string{nodeParam}\n var err error = nil\n if nodeParam == \"\" {\n if clusterType == \"\" {\n nodeList, err = getNodes()\n } else if clusterType == \"PRIMARY\" {\n nodeList, err = getNodes(\"PRIMARY\")\n } else if clusterType == \"READ_REPLICA\" {\n nodeList, err = getNodes(\"READ_REPLICA\")\n }\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n }\n hostToUuid, err := helpers.GetHostToUuidMap(helpers.HOST)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n // in case of errors parsing start/end time, set default start = 1 hour ago, end = now\n startTime, err := strconv.ParseInt(ctx.QueryParam(\"start_time\"), 10, 64)\n if err != nil {\n now := time.Now()\n startTime = now.Unix()\n }\n endTime, err := strconv.ParseInt(ctx.QueryParam(\"end_time\"), 10, 64)\n if err != nil {\n now := time.Now()\n endTime = now.Unix() - 60*60\n }\n\n metricResponse := models.MetricResponse{\n Data: []models.MetricData{},\n StartTimestamp: startTime,\n EndTimestamp: endTime,\n }\n\n session, err := c.GetSession()\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n\n for _, metric := range metricsParam {\n // Read from the table.\n var ts int64\n var value int\n var details string\n // need node uuid\n switch metric {\n case \"READ_OPS_PER_SEC\":\n rawMetricValues, err := getRawMetricsForAllNodes(READ_COUNT_METRIC,\n nodeList, hostToUuid, startTime, endTime, session, false)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n rateMetrics := convertRawMetricsToRates(rawMetricValues)\n nodeMetricValues := reduceGranularityForAllNodes(startTime, endTime,\n rateMetrics, GRANULARITY_NUM_INTERVALS, true)\n metricValues := calculateCombinedMetric(nodeMetricValues, false)\n metricResponse.Data = append(metricResponse.Data, models.MetricData{\n Name: metric,\n Values: metricValues,\n })\n case \"WRITE_OPS_PER_SEC\":\n rawMetricValues, err := getRawMetricsForAllNodes(WRITE_COUNT_METRIC,\n nodeList, hostToUuid, startTime, endTime, session, false)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n rateMetrics := convertRawMetricsToRates(rawMetricValues)\n nodeMetricValues := reduceGranularityForAllNodes(startTime, endTime,\n rateMetrics, GRANULARITY_NUM_INTERVALS, true)\n metricValues := calculateCombinedMetric(nodeMetricValues, false)\n metricResponse.Data = append(metricResponse.Data, models.MetricData{\n Name: metric,\n Values: metricValues,\n })\n case \"CPU_USAGE_USER\":\n metricValues, err := getAveragePercentageMetricData(\"cpu_usage_user\",\n nodeList, hostToUuid, startTime, endTime, session, true)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n metricResponse.Data = append(metricResponse.Data, models.MetricData{\n Name: metric,\n Values: metricValues,\n })\n case \"CPU_USAGE_SYSTEM\":\n metricValues, err := getAveragePercentageMetricData(\"cpu_usage_system\",\n nodeList, hostToUuid, startTime, endTime, session, true)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n metricResponse.Data = append(metricResponse.Data, models.MetricData{\n Name: metric,\n Values: metricValues,\n })\n case \"DISK_USAGE_GB\":\n // For disk usage, we assume every node reports the same metrics\n query := fmt.Sprintf(QUERY_FORMAT, \"system.metrics\", \"total_disk\",\n startTime*1000, endTime*1000)\n iter := session.Query(query).Iter()\n values := [][]float64{}\n for iter.Scan(&ts, &value, &details) {\n values = append(values,\n []float64{float64(ts) / 1000,\n float64(value) / helpers.BYTES_IN_GB})\n }\n if err := iter.Close(); err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n sort.Slice(values, func(i, j int) bool {\n return values[i][0] < values[j][0]\n })\n query = fmt.Sprintf(QUERY_FORMAT, \"system.metrics\", \"free_disk\",\n startTime*1000, endTime*1000)\n iter = session.Query(query).Iter()\n freeValues := [][]float64{}\n for iter.Scan(&ts, &value, &details) {\n freeValues = append(freeValues,\n []float64{float64(ts) / 1000,\n float64(value) / helpers.BYTES_IN_GB})\n }\n if err := iter.Close(); err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n sort.Slice(freeValues, func(i, j int) bool {\n return freeValues[i][0] < freeValues[j][0]\n })\n\n // assume query results for free and total disk have the same timestamps\n for index, pair := range freeValues {\n if index >= len(values) {\n break\n }\n values[index][1] -= float64(pair[1])\n }\n metricResponse.Data = append(metricResponse.Data, models.MetricData{\n Name: metric,\n Values: reduceGranularity(startTime, endTime, values,\n GRANULARITY_NUM_INTERVALS, true),\n })\n case \"PROVISIONED_DISK_SPACE_GB\":\n query := fmt.Sprintf(QUERY_FORMAT, \"system.metrics\", \"total_disk\",\n startTime*1000, endTime*1000)\n iter := session.Query(query).Iter()\n values := [][]float64{}\n for iter.Scan(&ts, &value, &details) {\n values = append(values,\n []float64{float64(ts) / 1000,\n float64(value) / helpers.BYTES_IN_GB})\n }\n if err := iter.Close(); err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n sort.Slice(values, func(i, j int) bool {\n return values[i][0] < values[j][0]\n })\n metricResponse.Data = append(metricResponse.Data, models.MetricData{\n Name: metric,\n Values: reduceGranularity(startTime, endTime, values,\n GRANULARITY_NUM_INTERVALS, true),\n })\n case \"AVERAGE_READ_LATENCY_MS\":\n rawMetricValuesCount, err := getRawMetricsForAllNodes(READ_COUNT_METRIC,\n nodeList, hostToUuid, startTime, endTime, session, false)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n\n rawMetricValuesSum, err := getRawMetricsForAllNodes(READ_SUM_METRIC,\n nodeList, hostToUuid, startTime, endTime, session, false)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n\n rateMetricsCount := convertRawMetricsToRates(rawMetricValuesCount)\n rateMetricsSum := convertRawMetricsToRates(rawMetricValuesSum)\n\n rateMetricsCountReduced := reduceGranularityForAllNodes(startTime, endTime,\n rateMetricsCount, GRANULARITY_NUM_INTERVALS, false)\n\n rateMetricsSumReduced := reduceGranularityForAllNodes(startTime, endTime,\n rateMetricsSum, GRANULARITY_NUM_INTERVALS, false)\n\n rateMetricsCountCombined :=\n calculateCombinedMetric(rateMetricsCountReduced, false)\n rateMetricsSumCombined :=\n calculateCombinedMetric(rateMetricsSumReduced, false)\n\n latencyMetric :=\n divideMetricForAllNodes([][][]float64{rateMetricsSumCombined},\n [][][]float64{rateMetricsCountCombined})\n\n metricValues := latencyMetric[0]\n // Divide everything by 1000 to convert from microseconds to milliseconds\n divideMetricByConstant(metricValues, 1000)\n metricResponse.Data = append(metricResponse.Data, models.MetricData{\n Name: metric,\n Values: metricValues,\n })\n case \"AVERAGE_WRITE_LATENCY_MS\":\n rawMetricValuesCount, err := getRawMetricsForAllNodes(WRITE_COUNT_METRIC,\n nodeList, hostToUuid, startTime, endTime, session, false)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n\n rawMetricValuesSum, err := getRawMetricsForAllNodes(WRITE_SUM_METRIC,\n nodeList, hostToUuid, startTime, endTime, session, false)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n\n rateMetricsCount := convertRawMetricsToRates(rawMetricValuesCount)\n rateMetricsSum := convertRawMetricsToRates(rawMetricValuesSum)\n\n rateMetricsCountReduced := reduceGranularityForAllNodes(startTime, endTime,\n rateMetricsCount, GRANULARITY_NUM_INTERVALS, false)\n\n rateMetricsSumReduced := reduceGranularityForAllNodes(startTime, endTime,\n rateMetricsSum, GRANULARITY_NUM_INTERVALS, false)\n\n rateMetricsCountCombined :=\n calculateCombinedMetric(rateMetricsCountReduced, false)\n rateMetricsSumCombined :=\n calculateCombinedMetric(rateMetricsSumReduced, false)\n\n latencyMetric :=\n divideMetricForAllNodes([][][]float64{rateMetricsSumCombined},\n [][][]float64{rateMetricsCountCombined})\n\n metricValues := latencyMetric[0]\n // Divide everything by 1000 to convert from microseconds to milliseconds\n divideMetricByConstant(metricValues, 1000)\n metricResponse.Data = append(metricResponse.Data, models.MetricData{\n Name: metric,\n Values: metricValues,\n })\n case \"TOTAL_LIVE_NODES\":\n rawMetricValues, err := getRawMetricsForAllNodes(\"node_up\", nodeList,\n hostToUuid, startTime, endTime, session, false)\n if err != nil {\n return ctx.String(http.StatusInternalServerError, err.Error())\n }\n reducedMetric := reduceGranularityForAllNodes(startTime, endTime,\n rawMetricValues, GRANULARITY_NUM_INTERVALS, true)\n metricValues := calculateCombinedMetric(reducedMetric, false)\n // In cases where there is no data, set to 0\n for i, metric := range metricValues {\n if len(metric) < 2 {\n metricValues[i] = append(metricValues[i], 0)\n }\n }\n metricResponse.Data = append(metricResponse.Data, models.MetricData{\n Name: metric,\n Values: metricValues,\n })\n }\n }\n return ctx.JSON(http.StatusOK, metricResponse)\n}", "func (f FakeContainerImpl) GetContainerMetrics(containerID string) (*metrics.ContainerMetrics, error) {\n\treturn nil, nil\n}", "func (s *service) Stats() Stats {\n\ts.m.Lock()\n\tdefer s.m.Unlock()\n\n\tstats := Stats{\n\t\tServiceIdentity: s.serviceIdentity(),\n\t\tEndpoints: make([]*EndpointStats, 0),\n\t\tType: StatsResponseType,\n\t\tStarted: s.started,\n\t}\n\tfor _, endpoint := range s.endpoints {\n\t\tendpointStats := &EndpointStats{\n\t\t\tName: endpoint.stats.Name,\n\t\t\tSubject: endpoint.stats.Subject,\n\t\t\tNumRequests: endpoint.stats.NumRequests,\n\t\t\tNumErrors: endpoint.stats.NumErrors,\n\t\t\tLastError: endpoint.stats.LastError,\n\t\t\tProcessingTime: endpoint.stats.ProcessingTime,\n\t\t\tAverageProcessingTime: endpoint.stats.AverageProcessingTime,\n\t\t}\n\t\tif s.StatsHandler != nil {\n\t\t\tdata, _ := json.Marshal(s.StatsHandler(endpoint))\n\t\t\tendpointStats.Data = data\n\t\t}\n\t\tstats.Endpoints = append(stats.Endpoints, endpointStats)\n\t}\n\treturn stats\n}", "func statsHandler(a *sir.ApplicationContext, c web.C, w http.ResponseWriter, r *http.Request) (int, error) {\n\t// Number of available names in the pool\n\tremaining, _ := a.Redis.SCard(a.PoolKey).Result()\n\t// Number of taken names in the pool\n\ttaken, _ := a.Redis.SCard(a.AllocatedKey).Result()\n\t// Remaining\n\ttotal := remaining + taken\n\n\tresp, _ := json.Marshal(&statsResponse{\n\t\tTotal: total,\n\t\tTaken: taken,\n\t\tRemaining: remaining,\n\t})\n\n\tstatus := 200\n\n\tw.WriteHeader(status)\n\tw.Write(resp)\n\n\treturn status, nil\n}", "func (s *Service) Info(ctx context.Context) (project.InfoSet, error) {\n\tresult := project.InfoSet{}\n\tcontainers, err := s.collectContainers(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, c := range containers {\n\t\tinfo, err := c.Info(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, info)\n\t}\n\n\treturn result, nil\n}", "func (ts *EventService) Stats(dim repository.Stats) []map[string]interface{} {\n\tallStats := make([]map[string]interface{}, 0, 1)\n\tfor k, v := range ts.eventRepo.Events(dim) {\n\t\tstat := map[string]interface{}{\n\t\t\tstring(dim): k,\n\t\t\t\"pageViews\": v,\n\t\t}\n\t\tallStats = append(allStats, stat)\n\t}\n\treturn allStats\n}" ]
[ "0.7228869", "0.6948396", "0.68941295", "0.6769662", "0.67652166", "0.6666272", "0.6609697", "0.6449724", "0.6334005", "0.6292268", "0.6249015", "0.61238223", "0.60853606", "0.60837454", "0.6057728", "0.60456675", "0.6023461", "0.60232055", "0.6010118", "0.5982012", "0.59548634", "0.59525806", "0.5921905", "0.5918658", "0.58805025", "0.5862986", "0.5837976", "0.5829137", "0.5825263", "0.58095217", "0.5803957", "0.57687104", "0.5767643", "0.5724182", "0.5638555", "0.5618873", "0.56157136", "0.5601097", "0.55787194", "0.5573501", "0.5557103", "0.55508053", "0.55300623", "0.5511144", "0.55004495", "0.5487162", "0.54841375", "0.5482598", "0.5480895", "0.54784614", "0.5476876", "0.5476327", "0.54650974", "0.54600656", "0.54479486", "0.5389519", "0.5374569", "0.5370856", "0.5360684", "0.5322695", "0.5320145", "0.53180283", "0.5312273", "0.52990395", "0.52974254", "0.5292607", "0.5292607", "0.5279763", "0.52702045", "0.5258908", "0.52315676", "0.5210552", "0.5206465", "0.5206447", "0.52050287", "0.52003264", "0.5193459", "0.51604277", "0.515125", "0.5148775", "0.5144756", "0.5136642", "0.51325285", "0.51215225", "0.51176614", "0.51088136", "0.50997925", "0.50909925", "0.5089543", "0.5083456", "0.50816476", "0.50806504", "0.50785875", "0.50725806", "0.5067598", "0.5065366", "0.5057043", "0.50428027", "0.50224495", "0.5020796" ]
0.5097511
87
NewStatsFormat returns a format for rendering an CStatsContext
func NewStatsFormat(source, osType string) formatter.Format { if source == formatter.TableFormatKey { if osType == winOSType { return formatter.Format(winDefaultStatsTableFormat) } return formatter.Format(defaultStatsTableFormat) } else if source == formatter.AutoRangeFormatKey { return formatter.Format(autoRangeStatsTableFormat) } return formatter.Format(source) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func statsFormatWrite(ctx formatter.Context, Stats []StatsEntry, osType string, trunc bool) error {\n\trender := func(format func(subContext formatter.SubContext) error) error {\n\t\tfor _, cstats := range Stats {\n\t\t\tstatsCtx := &statsContext{\n\t\t\t\ts: cstats,\n\t\t\t\tos: osType,\n\t\t\t\ttrunc: trunc,\n\t\t\t}\n\t\t\tif err := format(statsCtx); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tmemUsage := memUseHeader\n\tif osType == winOSType {\n\t\tmemUsage = winMemUseHeader\n\t}\n\tstatsCtx := statsContext{}\n\tstatsCtx.Header = formatter.SubHeaderContext{\n\t\t\"Container\": containerHeader,\n\t\t\"Name\": formatter.NameHeader,\n\t\t\"ID\": formatter.ContainerIDHeader,\n\t\t\"CPUPerc\": cpuPercHeader,\n\t\t\"MemUsage\": memUsage,\n\t\t\"MemPerc\": memPercHeader,\n\t\t\"NetIO\": netIOHeader,\n\t\t\"BlockIO\": blockIOHeader,\n\t\t\"PIDs\": pidsHeader,\n\t\t\"CurrentMemoryMin\": currentMemoryMinHeader,\n\t\t\"CurrentMemoryMax\": currentMemoryMaxHeader,\n\t\t\"OptiMemoryMin\": optiMemoryMinHeader,\n\t\t\"OptiMemoryMax\": optiMemoryMaxHeader,\n\t\t\"OptiCPUNumber\": optiCPUNumberHeader,\n\t\t\"UsedCPUPerc\": usedCPUPercHeader,\n\t\t\"OptiCPUTime\": optiCPUTimeHeader,\n\t}\n\tstatsCtx.os = osType\n\treturn ctx.Write(&statsCtx, render)\n}", "func NewFormat(chans int, freq freq.T, sc sample.Codec) *Format {\n\treturn &Format{\n\t\tchannels: chans,\n\t\tfreq: freq,\n\t\tCodec: sc}\n}", "func newFormat(format string) eval.Format {\n\treturn parseFormat(format, NO_STRING, NO_STRING, nil)\n}", "func NewStats(container string) *Stats {\n\treturn &Stats{StatsEntry: StatsEntry{Container: container}}\n}", "func NewFormat(source string, quiet bool) formatter.Format {\n\tswitch source {\n\tcase formatter.TableFormatKey:\n\t\tif quiet {\n\t\t\treturn formatter.DefaultQuietFormat\n\t\t}\n\t\treturn defaultNetworkTableFormat\n\tcase formatter.RawFormatKey:\n\t\tif quiet {\n\t\t\treturn `network_id: {{.ID}}`\n\t\t}\n\t\treturn `network_id: {{.ID}}\\nname: {{.Name}}\\ndriver: {{.Driver}}\\nscope: {{.Scope}}\\n`\n\t}\n\treturn formatter.Format(source)\n}", "func (statsResult *Result) Format() string {\n\tformat := \"\"\n\tformat += fmt.Sprintln(\"Summary:\")\n\tformat += fmt.Sprintf(\"\\tClients:\\t%d\\n\", statsResult.Clients)\n\tformat += fmt.Sprintf(\"\\tParallel calls per client:\\t%d\\n\", statsResult.Parallels)\n\tformat += fmt.Sprintf(\"\\tTotal calls:\\t%d\\n\", statsResult.TotalCalls)\n\tformat += fmt.Sprintf(\"\\tTotal time:\\t%.3fs\\n\", statsResult.TotalTime)\n\tformat += fmt.Sprintf(\"\\tRequests per second:\\t%.3f\\n\", statsResult.RequestsPerSecond)\n\tformat += fmt.Sprintf(\"\\tFastest time for request:\\t%.6fms\\n\", statsResult.FastestTimeForRequest)\n\tformat += fmt.Sprintf(\"\\tAverage time per request:\\t%.6fms\\n\", statsResult.AverageTimePerRequest)\n\tformat += fmt.Sprintf(\"\\tSlowest time for request:\\t%.6fms\\n\\n\", statsResult.SlowestTimeForRequest)\n\tformat += fmt.Sprintln(\"Time:\")\n\tformat += fmt.Sprintf(\"\\t00.0001%%\\ttime for request:\\t%.6fms\\n\", statsResult.N000001thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t00.0010%%\\ttime for request:\\t%.6fms\\n\", statsResult.N000010thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t00.0100%%\\ttime for request:\\t%.6fms\\n\", statsResult.N000100thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t00.1000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N001000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t01.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N010000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t05.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N050000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t10.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N100000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t25.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N250000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t50.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N500000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t75.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N750000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t90.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N900000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t95.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N950000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N990000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.9000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N999000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.9900%%\\ttime for request:\\t%.6fms\\n\", statsResult.N999900thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.9990%%\\ttime for request:\\t%.6fms\\n\", statsResult.N999990thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.9999%%\\ttime for request:\\t%.6fms\\n\\n\", statsResult.N999999thMillionthTime)\n\n\tif statsResult.TotalRequestBodySizes > 0 {\n\t\tformat += fmt.Sprintln(\"Request:\")\n\t\tformat += fmt.Sprintf(\"\\tTotal request body sizes:\\t%d\\n\", statsResult.TotalRequestBodySizes)\n\t\tformat += fmt.Sprintf(\"\\tAverage body size per request:\\t%.2f Byte\\n\", statsResult.AverageBodySizePerRequest)\n\t\tformat += fmt.Sprintf(\"\\tRequest rate per second:\\t%.2f Byte/s (%.2f MByte/s)\\n\\n\", statsResult.RequestRateBytePerSecond, statsResult.RequestRateMBytePerSecond)\n\t}\n\tif statsResult.TotalResponseBodySizes > 0 {\n\t\tformat += fmt.Sprintln(\"Response:\")\n\t\tformat += fmt.Sprintf(\"\\tTotal response body sizes:\\t%d\\n\", statsResult.TotalResponseBodySizes)\n\t\tformat += fmt.Sprintf(\"\\tAverage body size per response:\\t%.2f Byte\\n\", statsResult.AverageBodySizePerResponse)\n\t\tformat += fmt.Sprintf(\"\\tResponse rate per second:\\t%.2f Byte/s (%.2f MByte/s)\\n\\n\", statsResult.ResponseRateBytePerSecond, statsResult.ResponseRateMBytePerSecond)\n\t}\n\tformat += fmt.Sprintln(\"Result:\")\n\tformat += fmt.Sprintf(\"\\tResponse ok:\\t%d (%.3f%%)\\n\", statsResult.ResponseOk, statsResult.ResponseOkPercentile)\n\tformat += fmt.Sprintf(\"\\tErrors:\\t%d (%.3f%%)\\n\", statsResult.Errors, statsResult.ErrorsPercentile)\n\treturn format\n}", "func NewStats() *Stats {\n\tcs := new(Stats)\n\tcs.statMap = make(map[string]*FuncStat)\n\n\treturn cs\n}", "func NewStats() Stats {\n\treturn Stats{DeliveryService: map[enum.DeliveryServiceName]Stat{}}\n}", "func NewFormat(ctx context.Context, client *github.Client, debug bool) *Format {\n\treturn &Format{ctx: ctx, client: client, debug: debug}\n}", "func NewConnectivityFormat(source string, quiet bool) Format {\n\tswitch source {\n\tcase TableFormatKey:\n\t\tif quiet {\n\t\t\treturn connectivityTableQuietFormat\n\t\t}\n\t\treturn connectivityTableFormat\n\tcase RawFormatKey:\n\t\tif quiet {\n\t\t\treturn connectivityRawQuietFormat\n\t\t}\n\t\treturn connectivityRawFormat\n\tcase SummaryFormatKey:\n\t\treturn connectivitySummaryFormat\n\t}\n\treturn Format(source)\n}", "func New() *Stats {\n\treturn &Stats{\n\t\tStatusCode: map[string]uint64{},\n\t\tMethod: map[string]uint64{},\n\t\tPath: map[string]uint64{},\n\t\tInBytes: 0,\n\t\tOutBytes: 0,\n\t}\n}", "func newStats(code, comment, blank int) *Stats {\n\ttotal := code + comment + blank\n\tif total == 0 {\n\t\ttotal = 1\n\t}\n\treturn &Stats{\n\t\tcode,\n\t\tcomment,\n\t\tblank,\n\t\tcode + comment + blank,\n\t\tfloat64(code) / float64(total) * 100,\n\t\tfloat64(comment) / float64(total) * 100,\n\t\tfloat64(blank) / float64(total) * 100,\n\t}\n}", "func NewDiffFormat(source string) formatter.Format {\n\tswitch source {\n\tcase formatter.TableFormatKey:\n\t\treturn defaultDiffTableFormat\n\t}\n\treturn formatter.Format(source)\n}", "func NewStats() *Stats {\n\ts := new(Stats)\n\ts.categories = make(map[string][]time.Duration)\n\ts.startedSamples = make(map[string]time.Duration)\n\treturn s\n}", "func NewStats() *Stats {\n\tr := &Stats{\n\t\tAdaptiveSelection: make(map[string]AdaptiveSelection, 0),\n\t\tAttributes: make(map[string]string, 0),\n\t\tBreakers: make(map[string]Breaker, 0),\n\t\tScriptCache: make(map[string][]ScriptCache, 0),\n\t\tThreadPool: make(map[string]ThreadCount, 0),\n\t}\n\n\treturn r\n}", "func (bs *blplStats) statsJSON() string {\n\tbuf := bytes.NewBuffer(make([]byte, 0, 128))\n\tfmt.Fprintf(buf, \"{\")\n\tfmt.Fprintf(buf, \"\\n \\\"TxnCount\\\": %v,\", bs.txnCount)\n\tfmt.Fprintf(buf, \"\\n \\\"QueryCount\\\": %v,\", bs.queryCount)\n\tfmt.Fprintf(buf, \"\\n \\\"QueriesPerSec\\\": %v,\", bs.queriesPerSec)\n\tfmt.Fprintf(buf, \"\\n \\\"TxnPerSec\\\": %v\", bs.txnsPerSec)\n\tfmt.Fprintf(buf, \"\\n \\\"TxnTime\\\": %v,\", bs.txnTime)\n\tfmt.Fprintf(buf, \"\\n \\\"QueryTime\\\": %v,\", bs.queryTime)\n\tfmt.Fprintf(buf, \"\\n}\")\n\treturn buf.String()\n}", "func newHTTPStats() *httpStats {\n\treturn &httpStats{}\n}", "func CreateStats(cluster, namespace, volumeName, deploymentName, mountPath, pathRestic, podName string) string {\n\tvar stats map[string]interface{}\n\tvar nameStats string\n\tif cluster == \"ClusterFrom\" {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_from\")\n\t\tnameStats = \"statsFrom\"\n\t} else {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_to\")\n\t\tnameStats = \"statsTo\"\n\t}\n\n\tauxName := \"stats-\" + deploymentName\n\tsizeVolume := utils.GetSizeVolume(podName, volumeName, mountPath)\n\tstats[\"name\"] = auxName\n\tstats[\"size\"] = sizeVolume\n\terr := utils.WriteJson(pathRestic, nameStats, stats)\n\tif err != nil {\n\t\tfmt.Println(\"Error creating \" + auxName)\n\t}\n\treturn sizeVolume\n}", "func Benchmark_Ctx_Format(b *testing.B) {\n\tapp := New()\n\tc := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(c)\n\tc.Fasthttp.Request.Header.Set(\"Accept\", \"text/plain\")\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tc.Format(\"Hello, World!\")\n\t}\n\tutils.AssertEqual(b, `Hello, World!`, string(c.Fasthttp.Response.Body()))\n}", "func newConnStats() *ConnStats {\n\treturn &ConnStats{}\n}", "func newConnStats() *ConnStats {\n\treturn &ConnStats{}\n}", "func createStat(votes *Vote) string {\n\n\tstats := NewStatistics(votes)\n\n\tstr := \"Total: \" + strconv.Itoa(stats.Total) + \"\\n\"\n\tfor value, users := range stats.Transformed {\n\t\tstr += value + \" (\" + strconv.Itoa(len(users)) + \"): \" + strings.Join(users, \", \") + \"\\n\"\n\t}\n\n\treturn str\n\n}", "func newStatsReporter() (*reporter, error) {\n\treturn &reporter{}, nil\n}", "func NewPercentage(format string, wcc ...WC) Decorator {\n\tif format == \"\" {\n\t\tformat = \"% d\"\n\t}\n\tf := func(s Statistics) string {\n\t\tp := internal.Percentage(s.Total, s.Current, 100)\n\t\treturn fmt.Sprintf(format, percentageType(p))\n\t}\n\treturn Any(f, wcc...)\n}", "func newStatsProvider(\n\tcadvisor cadvisor.Interface,\n\tpodManager PodManager,\n\truntimeCache kubecontainer.RuntimeCache,\n\tcontainerStatsProvider containerStatsProvider,\n) *Provider {\n\treturn &Provider{\n\t\tcadvisor: cadvisor,\n\t\tpodManager: podManager,\n\t\truntimeCache: runtimeCache,\n\t\tcontainerStatsProvider: containerStatsProvider,\n\t}\n}", "func NewFormatter(format string) Formatter {\n\tif fun, ok := formatMap[format]; ok {\n\t\treturn fun()\n\t}\n\treturn nil\n}", "func New() *Formatter {\n\treturn &Formatter{\n\t\tformats: map[string]Format{},\n\t}\n}", "func Benchmark_Ctx_Format_JSON(b *testing.B) {\n\tapp := New()\n\tc := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(c)\n\tc.Fasthttp.Request.Header.Set(\"Accept\", \"application/json\")\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tc.Format(\"Hello, World!\")\n\t}\n\tutils.AssertEqual(b, `\"Hello, World!\"`, string(c.Fasthttp.Response.Body()))\n}", "func writeStats(to *os.File, final bool, s, t stats.Stats) {\n\tp := fmt.Fprintf\n\tpn := prettyNumber\n\tpb := prettyNumBytes\n\tpl := prettyLatency\n\tpt := prettyTimeStamp\n\tif final {\n\t\twriteStatsHeader(to)\n\t\tp(to, statsPrintHeader, pt(), \"Put\",\n\t\t\tpn(t.TotalPuts()),\n\t\t\tpb(t.TotalPutBytes()),\n\t\t\tpl(t.MinPutLatency(), t.AvgPutLatency(), t.MaxPutLatency()),\n\t\t\tpb(t.PutThroughput(time.Now())),\n\t\t\tpn(t.TotalErrPuts()))\n\t\tp(to, statsPrintHeader, pt(), \"Get\",\n\t\t\tpn(t.TotalGets()),\n\t\t\tpb(t.TotalGetBytes()),\n\t\t\tpl(t.MinGetLatency(), t.AvgGetLatency(), t.MaxGetLatency()),\n\t\t\tpb(t.GetThroughput(time.Now())),\n\t\t\tpn(t.TotalErrGets()))\n\t} else {\n\t\t// show interval stats; some fields are shown of both interval and total, for example, gets, puts, etc\n\t\tif s.TotalPuts() != 0 {\n\t\t\tp(to, statsPrintHeader, pt(), \"Put\",\n\t\t\t\tpn(s.TotalPuts())+\"(\"+pn(t.TotalPuts())+\")\",\n\t\t\t\tpb(s.TotalPutBytes())+\"(\"+pb(t.TotalPutBytes())+\")\",\n\t\t\t\tpl(s.MinPutLatency(), s.AvgPutLatency(), s.MaxPutLatency()),\n\t\t\t\tpb(s.PutThroughput(time.Now()))+\"(\"+pb(t.PutThroughput(time.Now()))+\")\",\n\t\t\t\tpn(s.TotalErrPuts())+\"(\"+pn(t.TotalErrPuts())+\")\")\n\t\t}\n\t\tif s.TotalGets() != 0 {\n\t\t\tp(to, statsPrintHeader, pt(), \"Get\",\n\t\t\t\tpn(s.TotalGets())+\"(\"+pn(t.TotalGets())+\")\",\n\t\t\t\tpb(s.TotalGetBytes())+\"(\"+pb(t.TotalGetBytes())+\")\",\n\t\t\t\tpl(s.MinGetLatency(), s.AvgGetLatency(), s.MaxGetLatency()),\n\t\t\t\tpb(s.GetThroughput(time.Now()))+\"(\"+pb(t.GetThroughput(time.Now()))+\")\",\n\t\t\t\tpn(s.TotalErrGets())+\"(\"+pn(t.TotalErrGets())+\")\")\n\t\t}\n\t}\n}", "func NewStat() *Stat {\n\treturn &Stat{}\n}", "func NewStatsExporter(ctx context.Context, opts *Options) (*StatsExporter, error) {\n\tclient, err := newMetricClient(ctx, opts.ClientOptions...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create a metric client: %v\", err)\n\t}\n\n\te := &StatsExporter{\n\t\tctx: ctx,\n\t\tclient: client,\n\t\topts: opts,\n\t\tprojDataMap: make(map[string]*projectData),\n\t}\n\n\t// We don't want to modify user-supplied options, so save default options directly in\n\t// exporter.\n\tif opts.GetProjectID != nil {\n\t\te.getProjectID = opts.GetProjectID\n\t} else {\n\t\te.getProjectID = defaultGetProjectID\n\t}\n\tif opts.OnError != nil {\n\t\te.onError = opts.OnError\n\t} else {\n\t\te.onError = defaultOnError\n\t}\n\tif opts.MakeResource != nil {\n\t\te.makeResource = opts.MakeResource\n\t} else {\n\t\te.makeResource = defaultMakeResource\n\t}\n\n\treturn e, nil\n}", "func fnFormat(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) == 0 || len(params) > 3 {\n\t\tctx.Log().Error(\"error_type\", \"func_format\", \"op\", \"format\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to format function\"), \"format\", params})\n\t\treturn \"\"\n\t}\n\tts := time.Now()\n\tif len(params) >= 1 {\n\t\tms, err := strconv.Atoi(extractStringParam(params[0]))\n\t\tif err != nil {\n\t\t\tctx.Log().Error(\"error_type\", \"func_format\", \"op\", \"format\", \"cause\", \"time_stamp_expected\", \"params\", params, \"error\", err.Error())\n\t\t\tstats.IncErrors()\n\t\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"time stamp parameter expected in call to format function\"), \"format\", params})\n\t\t\treturn \"\"\n\t\t}\n\t\tts = time.Unix(int64(ms/1000), 0)\n\t}\n\tlayout := \"3:04pm\"\n\tif len(params) >= 2 {\n\t\tlayout = extractStringParam(params[1])\n\t}\n\tif len(params) == 3 {\n\t\ttz, err := time.LoadLocation(extractStringParam(params[2]))\n\t\tif err == nil {\n\t\t\tts = ts.In(tz)\n\t\t} else {\n\t\t\tctx.Log().Error(\"error_type\", \"func_format\", \"op\", \"format\", \"cause\", \"failed_loading_location\", \"location\", extractStringParam(params[2]), \"error\", err.Error())\n\t\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"failed loading location %s in call to format function\", extractStringParam(params[2])), \"format\", params})\n\t\t}\n\t}\n\treturn ts.Format(layout)\n}", "func NewActionStats() ActionStats {\n stats := ActionStats{}\n stats.stats = make(map[string]*actionData)\n return stats\n}", "func NewCmdDBStats(o *StatsOption) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"stats\",\n\t\tShort: \"Get statistics\",\n\t\tLong: `This command get statistics`,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tif err := o.Validate(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn o.Run()\n\t\t},\n\t}\n\n\tcmd.Flags().StringVarP(&o.queryName, \"query\", \"q\", \"\", \"stats query name\")\n\tcmd.Flags().IntVarP(&o.year, \"year\", \"y\", 0, \"year\")\n\n\treturn cmd\n}", "func (o JsonSerializationOutput) Format() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v JsonSerialization) *string { return v.Format }).(pulumi.StringPtrOutput)\n}", "func newFSFormatV1() (format *formatConfigV1) {\n\treturn &formatConfigV1{\n\t\tVersion: \"1\",\n\t\tFormat: \"fs\",\n\t\tFS: &fsFormat{\n\t\t\tVersion: \"1\",\n\t\t},\n\t}\n}", "func (ctx *Context) Format(f fmt.State, c rune) {\n\tvar str string\n\n\tp, pok := f.Precision()\n\tif !pok {\n\t\tp = -1\n\t}\n\n\tswitch c {\n\tcase 'a':\n\t\tif f.Flag('#') {\n\t\t\tstr = ctx.ProxyClient()\n\t\t\tbreak\n\t\t}\n\t\tstr = ctx.Request.RemoteAddr\n\tcase 'b':\n\t\tif ctx.Bytes() == 0 {\n\t\t\tf.Write([]byte{45})\n\t\t\treturn\n\t\t}\n\t\tfallthrough\n\tcase 'B':\n\t\tstr = strconv.Itoa(ctx.Bytes())\n\tcase 'h':\n\t\tt := strings.Split(ctx.Request.RemoteAddr, \":\")\n\t\tstr = t[0]\n\tcase 'l':\n\t\tf.Write([]byte{45})\n\t\treturn\n\tcase 'm':\n\t\tstr = ctx.Request.Method\n\tcase 'q':\n\t\tstr = ctx.Request.URL.RawQuery\n\tcase 'r':\n\t\tstr = ctx.Request.Method + \" \" + ctx.Request.URL.RequestURI()\n\t\tif f.Flag('#') {\n\t\t\tbreak\n\t\t}\n\t\tstr += \" \" + ctx.Request.Proto\n\tcase 's':\n\t\tstr = strconv.Itoa(ctx.Status())\n\t\tif f.Flag('#') {\n\t\t\tstr += \" \" + http.StatusText(ctx.Status())\n\t\t}\n\tcase 't':\n\t\tt := ctx.Info.GetTime(\"context.start_time\")\n\t\tstr = t.Format(\"[02/Jan/2006:15:04:05 -0700]\")\n\tcase 'u':\n\t\t// XXX: i dont think net/http sets User\n\t\tif ctx.Request.URL.User == nil {\n\t\t\tf.Write([]byte{45})\n\t\t\treturn\n\t\t}\n\t\tstr = ctx.Request.URL.User.Username()\n\tcase 'v':\n\t\tstr = ctx.Request.Host\n\tcase 'A':\n\t\tstr = ctx.Request.UserAgent()\n\tcase 'C':\n\t\tstr = statusColor(ctx.Status())\n\tcase 'D':\n\t\twhen := ctx.Info.GetTime(\"context.start_time\")\n\t\tif when.IsZero() {\n\t\t\tf.Write([]byte(\"%!(BADTIME)\"))\n\t\t\treturn\n\t\t}\n\t\tpok = false\n\t\tstr = strconv.FormatFloat(time.Since(when).Seconds(), 'f', p, 32)\n\tcase 'H':\n\t\tstr = ctx.Request.Proto\n\tcase 'I':\n\t\tstr = fmt.Sprintf(\"%d\", ctx.Request.ContentLength)\n\tcase 'L':\n\t\tstr = ctx.Info.Get(\"context.request_id\")\n\tcase 'P':\n\t\ts := strings.Split(ctx.Request.Host, \":\")\n\t\tif len(s) > 1 {\n\t\t\tstr = s[1]\n\t\t\tbreak\n\t\t}\n\t\tstr = \"80\"\n\tcase 'R':\n\t\tstr = ctx.Request.Referer()\n\tcase 'U':\n\t\tstr = ctx.Request.URL.Path\n\t}\n\tif pok {\n\t\tstr = str[:p]\n\t}\n\tf.Write([]byte(str))\n}", "func NewStats() (*Stats, error) {\n\t// TODO: Make it singleton if possible.\n\tprocess, err := process.NewProcess(int32(os.Getpid()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar cpuUsage float64\n\tif c, err := process.CPUPercent(); err == nil {\n\t\tcpuUsage = c\n\t}\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\treturn &Stats{\n\t\tGoroutines: runtime.NumGoroutine(),\n\t\tCPUUsage: cpuUsage,\n\t\tMemStats: MemStats{\n\t\t\tHeapAlloc: m.HeapAlloc,\n\t\t\tHeapIdle: m.HeapIdle,\n\t\t\tHeapInuse: m.HeapInuse,\n\t\t},\n\t}, nil\n}", "func NewFormatter(format string, colored bool) (*Formatter, error) {\n\tfm := new(Formatter)\n\tfm.colored = colored\n\tif err := fm.SetFormat(format); err != nil {\n\t\treturn nil, err\n\t}\n\treturn fm, nil\n}", "func NewStatsWriter(cfg *config.AgentConfig, in <-chan *pb.StatsPayload, telemetryCollector telemetry.TelemetryCollector) *StatsWriter {\n\tsw := &StatsWriter{\n\t\tin: in,\n\t\tstats: &info.StatsWriterInfo{},\n\t\tstop: make(chan struct{}),\n\t\tflushChan: make(chan chan struct{}),\n\t\tsyncMode: cfg.SynchronousFlushing,\n\t\teasylog: log.NewThrottled(5, 10*time.Second), // no more than 5 messages every 10 seconds\n\t\tconf: cfg,\n\t}\n\tclimit := cfg.StatsWriter.ConnectionLimit\n\tif climit == 0 {\n\t\t// Allow 1% of the connection limit to outgoing sends. The original\n\t\t// connection limit was removed and used to be 2000 (1% = 20)\n\t\tclimit = 20\n\t}\n\tqsize := cfg.StatsWriter.QueueSize\n\tif qsize == 0 {\n\t\tpayloadSize := float64(maxEntriesPerPayload * bytesPerEntry)\n\t\t// default to 25% of maximum memory.\n\t\tmaxmem := cfg.MaxMemory / 4\n\t\tif maxmem == 0 {\n\t\t\t// or 250MB if unbound\n\t\t\tmaxmem = 250 * 1024 * 1024\n\t\t}\n\t\tqsize = int(math.Max(1, maxmem/payloadSize))\n\t}\n\tlog.Debugf(\"Stats writer initialized (climit=%d qsize=%d)\", climit, qsize)\n\tsw.senders = newSenders(cfg, sw, pathStats, climit, qsize, telemetryCollector)\n\treturn sw\n}", "func NewStatsCollector(cliContext *cli.Context) (*StatsCollector, error) {\n\n\t// fill the Collector struct\n\tcollector := &StatsCollector{\n\t\tcliContext: cliContext,\n\t\tsocketPath: cliContext.String(\"socketPath\"),\n\t\tkamailioHost: cliContext.String(\"host\"),\n\t\tkamailioPort: cliContext.Int(\"port\"),\n\t}\n\n\t// fine, return the created object struct\n\treturn collector, nil\n}", "func StatsJSON(dsStats Stats) StatsOld {\n\tnow := time.Now().Unix()\n\tjsonObj := &StatsOld{DeliveryService: map[enum.DeliveryServiceName]map[StatName][]StatOld{}}\n\n\tfor deliveryService, dsStat := range dsStats.DeliveryService {\n\t\tjsonObj.DeliveryService[deliveryService] = map[StatName][]StatOld{}\n\t\tjsonObj = addCommonData(jsonObj, dsStat.CommonData(), deliveryService, now)\n\t\tif stat, ok := dsStat.(*StatHTTP); ok {\n\t\t\tfor cacheGroup, cacheGroupStats := range stat.CacheGroups {\n\t\t\t\tjsonObj = addStatCacheStats(jsonObj, cacheGroupStats, deliveryService, string(\"location.\"+cacheGroup), now)\n\t\t\t}\n\t\t\tfor cacheType, typeStats := range stat.Type {\n\t\t\t\tjsonObj = addStatCacheStats(jsonObj, typeStats, deliveryService, \"type.\"+cacheType.String(), now)\n\t\t\t}\n\t\t\tjsonObj = addStatCacheStats(jsonObj, stat.Total, deliveryService, \"total\", now)\n\t\t}\n\t}\n\treturn *jsonObj\n}", "func (f *FakeFormatter) Format(b *bytes.Buffer, lvl golog.Level, ctx golog.Context, msg string, trace []byte) *bytes.Buffer {\n\tb.WriteString(msg)\n\treturn b\n}", "func (ft *Time) JSONStats() string {\n\treturn ft.Stats.JSON()\n}", "func (f *Framework) NewStatsCollector(name, version string) *StatsCollector {\n\tif f.StressTestLevel > 0 {\n\t\tname = fmt.Sprintf(\"stress_%v_%v\", f.StressTestLevel, name)\n\t}\n\treturn &StatsCollector{name: name, outputDir: f.PerfOutputDir, version: version}\n}", "func (f *LogstashFormatter) Format(\n\tb *bytes.Buffer,\n\tlvl Level,\n\tctx Context,\n\tmsg string,\n\ttrace []byte,\n) *bytes.Buffer {\n\tb.WriteString(`{\"@timestamp\":\"`)\n\tb.WriteString(time.Now().Format(time.RFC3339Nano))\n\tb.WriteString(`\",\"@version\":1,\"level\":\"`)\n\tb.WriteString(LevelToString(lvl))\n\tb.WriteString(`\",\"`)\n\n\tfor k, v := range ctx {\n\t\tb.WriteString(k)\n\t\tb.WriteString(`\":`)\n\t\tf.appendValue(b, v)\n\t\tb.WriteString(`,\"`)\n\t}\n\n\tb.WriteString(`message\":`)\n\tb.WriteString(strconv.Quote(string(msg)))\n\tif len(trace) > 0 {\n\t\tb.WriteString(`,\"trace\":`)\n\t\tb.WriteString(strconv.Quote(string(trace)))\n\t}\n\tb.WriteString(`}`)\n\tb.WriteByte('\\n')\n\treturn b\n}", "func New(name string) (Format, error) {\n\tif name == \"\" {\n\t\treturn nil, NewErrFormatNameIsEmpty()\n\t}\n\n\tif synonym, ok := synonyms[name]; ok {\n\t\tname = synonym\n\t}\n\n\tswitch name {\n\tcase JSON:\n\t\treturn NewJSON(), nil\n\tcase YAML:\n\t\treturn NewYAML(), nil\n\tcase TOML:\n\t\treturn NewTOML(), nil\n\tcase HEX:\n\t\treturn NewHEX(), nil\n\tcase BASE64:\n\t\treturn NewBASE64(), nil\n\tcase QUERY:\n\t\treturn NewQUERY(), nil\n\tcase FLATE:\n\t\treturn NewFLATE(), nil\n\tdefault:\n\t\treturn nil, NewErrNotSupported(name)\n\t}\n}", "func NewStereoFmt() *Format {\n\treturn &Format{\n\t\tchannels: 2,\n\t\tfreq: 44100 * freq.Hertz,\n\t\tCodec: sample.SInt16L}\n}", "func (miner *CGMiner) StatsContext(ctx context.Context) (Stats, error) {\n\tresult, err := miner.commandCtx(ctx, \"stats\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp statsResponse\n\t// fix incorrect json response from miner \"}{\"\n\tfixResponse := bytes.Replace(result, []byte(\"}{\"), []byte(\",\"), 1)\n\tfixResponse = bytes.Replace(fixResponse, []byte(\"},{\"), []byte(\",\"), 1)\n\terr = json.Unmarshal(fixResponse, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = miner.checkStatus(resp.Status)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.Stats) < 1 {\n\t\treturn nil, errors.New(\"no stats in JSON response\")\n\t}\n\tif len(resp.Stats) > 1 {\n\t\treturn nil, errors.New(\"too many stats in JSON response\")\n\t}\n\n\treturn &resp.Stats[0], nil\n}", "func (metrics *Metrics) metricFormat(name string, value int) string {\n\treturn fmt.Sprintf(\n\t\t\"%s %d %d\\n\",\n\t\tmetrics.Tag+\".\"+name,\n\t\tvalue,\n\t\ttime.Now().Unix(),\n\t)\n}", "func NewFormat(prefix, indent, quote, lineEnd string) Format {\n\tif quote == \"\" {\n\t\tquote = `\"`\n\t}\n\tif lineEnd == \"\" {\n\t\tlineEnd = \"\\n\"\n\t}\n\treturn Format{\n\t\tPrefix: prefix,\n\t\tIndent: indent,\n\t\tIsBreaking: prefix != \"\" || indent != \"\",\n\t\tQuote: quote,\n\t\tLineEnd: lineEnd,\n\t}\n}", "func (s SamplesC64) Format() SampleFormat {\n\treturn SampleFormatC64\n}", "func MakeHandleStats(stats *Stats) func(w http.ResponseWriter, r *http.Request){\r\n return func (w http.ResponseWriter, r *http.Request){\r\n //Note totalTime does not picked up by JSON as it's lowercase\r\n jsonStats, err := json.Marshal(stats)\r\n if err != nil {\r\n sendError(w, \"404 Error getting JSON object\")\r\n return\r\n }\r\n w.Header().Set(\"Content-Type\", \"application/json\")\r\n f, err := w.Write(jsonStats)\r\n checkError(f, err)\r\n }\r\n}", "func NewStatsdNew(addr string, options ...statsd.Option) *statsd.Client {\n\tclient, err := statsd.New(addr, options...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn client\n}", "func (a *dbeat) newNetStats(stats *types.StatsJSON) *NetStats {\n\tvar net = &NetStats{Time: stats.Read}\n\tfor _, netStats := range stats.Networks {\n\t\tnet.RxBytes += netStats.RxBytes\n\t\tnet.RxDropped += netStats.RxDropped\n\t\tnet.RxErrors += netStats.RxErrors\n\t\tnet.RxPackets += netStats.RxPackets\n\t\tnet.TxBytes += netStats.TxBytes\n\t\tnet.TxDropped += netStats.TxDropped\n\t\tnet.TxErrors += netStats.TxErrors\n\t\tnet.TxPackets += netStats.TxPackets\n\t}\n\treturn net\n}", "func (fs FS) NewFNATStats() (FNATStats, error) {\n\tfile, err := os.Open(fs.Path(\"net/ip_vs_stats\"))\n\tif err != nil {\n\t\treturn FNATStats{}, err\n\t}\n\tdefer file.Close()\n\n\treturn parseFNATStats(file)\n}", "func NewFormatter(providers map[string]Provider) Formatter {\n\treturn Formatter{\n\t\tproviders: providers,\n\t}\n}", "func (node *CurTimeFuncExpr) Format(buf *TrackedBuffer) {\n\tbuf.astPrintf(node, \"%s(%v)\", node.Name.String(), node.Fsp)\n}", "func (h *Handlers) GetStats(w http.ResponseWriter, r *http.Request) {\n\tdataJSON, err := h.pkgManager.GetStatsJSON(r.Context())\n\tif err != nil {\n\t\th.logger.Error().Err(err).Str(\"method\", \"GetStats\").Send()\n\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\thelpers.RenderJSON(w, dataJSON, helpers.DefaultAPICacheMaxAge)\n}", "func (pipe *pipe) SampleFormat() SampleFormat {\n\treturn pipe.format\n}", "func GetCurrentFormat(format string) string {\n\treturn UTCTimeString(GetCurrentTime(), format)\n}", "func (ColourFormatter) Format(span Span) string {\n\tduration, _ := time.ParseDuration(fmt.Sprintf(\"%dns\", span.Duration))\n\n\ttags := \"\"\n\n\tfor k, v := range span.Meta {\n\t\ttags = fmt.Sprintf(\"%s %s:%s\", tags, color.CyanString(k), strconv.Quote(v))\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"[Trace] %s %s %s %s %s %s%s\\n\",\n\t\tcolor.HiCyanString(span.Service),\n\t\tcolor.GreenString(span.Operation),\n\t\tcolor.MagentaString(span.Resource),\n\t\tcolor.WhiteString(span.Type),\n\t\tcolor.YellowString(\"%s\", duration),\n\t\tcolor.WhiteString(\"%d / %d\", span.ParentID, span.SpanID),\n\t\ttags,\n\t)\n}", "func GetStats() (string, error) {\n\ttotalStats, err := getStats()\n\tstats := new(statistics)\n\tstats.NumAveraged = totalStats.totalCount\n\tstats.Average = totalStats.totalDuration / totalStats.totalCount\n\tb, err := json.Marshal(stats)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Received error when marshalling JSON: %s\", err)\n\t\tlog.Printf(msg)\n\t\treturn \"\", errors.New(msg)\n\t}\n\treturn string(b), nil\n}", "func (s *Store) Stats() []byte {\n\tb, _ := json.Marshal(s.BasicStats)\n\treturn b\n}", "func (f *formatter) Format(pool *bufferPool, level logLevel, msg string) *bytes.Buffer {\n\tvar buf = pool.get()\n\tbuf.WriteString(fmt.Sprintf(\"%c %06v\", level.abbr(), atomic.LoadUint32(&currentTime)))\n\t_, file, line, ok := runtime.Caller(3)\n\tif ok {\n\t\tvar i int\n\t\tfor i = len(file) - 1; i >= 0; i-- {\n\t\t\tif file[i] == '/' {\n\t\t\t\tif i == len(file)-1 {\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\ti++\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tbuf.WriteString(fmt.Sprintf(\" [%v:%v] %v\\n\", file[i:], line, msg))\n\t}\n\treturn buf\n}", "func StatsOutput(c *VsmStatsCommand, annotations *Annotations, args []string, statusArray []string, stats1 VolumeStats, stats2 VolumeStats) error {\n\n\tvar (\n\t\terr error\n\n\t\tReadLatency int64\n\t\tWriteLatency int64\n\n\t\tAvgReadBlockCountPS int64\n\t\tAvgWriteBlockCountPS int64\n\t)\n\n\t// 10 and 64 represents decimal and bits respectively\n\ti_riops, _ := strconv.ParseInt(stats1.ReadIOPS, 10, 64) // Initial\n\tf_riops, _ := strconv.ParseInt(stats2.ReadIOPS, 10, 64) // Final\n\treadIOPS := f_riops - i_riops\n\n\ti_rtps, _ := strconv.ParseInt(stats1.TotalReadTime, 10, 64)\n\tf_rtps, _ := strconv.ParseInt(stats2.TotalReadTime, 10, 64)\n\treadTimePS := f_rtps - i_rtps\n\n\ti_rbps, _ := strconv.ParseInt(stats1.TotalReadBlockCount, 10, 64)\n\tf_rbps, _ := strconv.ParseInt(stats2.TotalReadBlockCount, 10, 64)\n\treadBlockCountPS := f_rbps - i_rbps\n\n\trThroughput := readBlockCountPS\n\tif readIOPS != 0 {\n\t\tReadLatency = readTimePS / readIOPS\n\t\tAvgReadBlockCountPS = readBlockCountPS / readIOPS\n\t} else {\n\t\tReadLatency = 0\n\t\tAvgReadBlockCountPS = 0\n\t}\n\n\ti_wiops, _ := strconv.ParseInt(stats1.WriteIOPS, 10, 64)\n\tf_wiops, _ := strconv.ParseInt(stats2.WriteIOPS, 10, 64)\n\twriteIOPS := f_wiops - i_wiops\n\n\ti_wtps, _ := strconv.ParseInt(stats1.TotalWriteTime, 10, 64)\n\tf_wtps, _ := strconv.ParseInt(stats2.TotalWriteTime, 10, 64)\n\twriteTimePS := f_wtps - i_wtps\n\n\ti_wbcps, _ := strconv.ParseInt(stats1.TotalWriteBlockCount, 10, 64)\n\tf_wbcps, _ := strconv.ParseInt(stats2.TotalWriteBlockCount, 10, 64)\n\twriteBlockCountPS := f_wbcps - i_wbcps\n\n\twThroughput := writeBlockCountPS\n\tif writeIOPS != 0 {\n\t\tWriteLatency = writeTimePS / writeIOPS\n\t\tAvgWriteBlockCountPS = writeBlockCountPS / writeIOPS\n\t} else {\n\t\tWriteLatency = 0\n\t\tAvgWriteBlockCountPS = 0\n\t}\n\n\tss, _ := strconv.ParseFloat(stats2.SectorSize, 64) // Sector Size\n\tss = ss / bytesToMB\n\n\tls, _ := strconv.ParseFloat(stats2.UsedBlocks, 64) // Logical Size\n\tls = ls * ss\n\n\tau, _ := strconv.ParseFloat(stats2.UsedLogicalBlocks, 64) // Actual Used\n\tau = au * ss\n\n\tannotation := Annotation{\n\t\tIQN: annotations.Iqn,\n\t\tVolume: args[0],\n\t\tPortal: annotations.TargetPortal,\n\t\tSize: annotations.VolSize,\n\t}\n\n\t// json formatting and showing default output\n\tif c.Json == \"json\" {\n\n\t\tstat1 := StatsArr{\n\n\t\t\tIQN: annotations.Iqn,\n\t\t\tVolume: args[0],\n\t\t\tPortal: annotations.TargetPortal,\n\t\t\tSize: annotations.VolSize,\n\n\t\t\tReadIOPS: readIOPS,\n\t\t\tWriteIOPS: writeIOPS,\n\n\t\t\tReadThroughput: float64(rThroughput) / bytesToMB, // bytes to MB\n\t\t\tWriteThroughput: float64(wThroughput) / bytesToMB,\n\n\t\t\tReadLatency: float64(ReadLatency) / mic_sec, // Microsecond\n\t\t\tWriteLatency: float64(WriteLatency) / mic_sec,\n\n\t\t\tAvgReadBlockSize: AvgReadBlockCountPS / bytesToKB, // Bytes to KB\n\t\t\tAvgWriteBlockSize: AvgWriteBlockCountPS / bytesToKB,\n\n\t\t\tSectorSize: ss,\n\t\t\tActualUsed: au,\n\t\t\tLogicalSize: ls,\n\t\t}\n\n\t\tdata, err := json.MarshalIndent(stat1, \"\", \"\\t\")\n\n\t\tif err != nil {\n\n\t\t\tpanic(err)\n\t\t}\n\n\t\tos.Stdout.Write(data)\n\n\t} else {\n\n\t\t// Printing in tabular form\n\t\t//\tfmt.Printf(\"%+v\\n\\n\", annotation)\n\t\tdata, err := json.MarshalIndent(annotation, \"\", \"\\t\")\n\n\t\tif err != nil {\n\n\t\t\tpanic(err)\n\t\t}\n\n\t\tos.Stdout.Write(data)\n\n\t\tq := tabwriter.NewWriter(os.Stdout, minwidth, maxwidth, padding, ' ', tabwriter.AlignRight|tabwriter.Debug)\n\n\t\tfmt.Fprintf(q, \"\\n\\nReplica\\tStatus\\tDataUpdateIndex\\t\\n\")\n\t\tfmt.Fprintf(q, \"\\t\\t\\t\\n\")\n\t\tfor i := 0; i < 4; i += 3 {\n\n\t\t\tfmt.Fprintf(q, \"%s\\t%s\\t%s\\t\\n\", statusArray[i], statusArray[i+1], statusArray[i+2])\n\t\t}\n\n\t\tq.Flush()\n\n\t\tw := tabwriter.NewWriter(os.Stdout, minwidth, maxwidth, padding, ' ', tabwriter.AlignRight|tabwriter.Debug)\n\t\tfmt.Println(\"\\n----------- Performance Stats -----------\\n\")\n\t\tfmt.Fprintf(w, \"r/s\\tw/s\\tr(MB/s)\\tw(MB/s)\\trLat(ms)\\twLat(ms)\\t\\n\")\n\t\tfmt.Fprintf(w, \"%d\\t%d\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t\\n\", readIOPS, writeIOPS, float64(rThroughput)/bytesToMB, float64(wThroughput)/bytesToMB, float64(ReadLatency)/mic_sec, float64(WriteLatency)/mic_sec)\n\t\tw.Flush()\n\n\t\tx := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.AlignRight|tabwriter.Debug)\n\t\tfmt.Println(\"\\n------------ Capacity Stats -------------\\n\")\n\t\tfmt.Fprintf(x, \"Logical(GB)\\tUsed(GB)\\t\\n\")\n\t\tfmt.Fprintf(x, \"%f\\t%f\\t\\n\", ls, au)\n\t\tx.Flush()\n\t}\n\n\treturn err\n}", "func (ati *actionTrackerImpl) GetStats() string {\n\toutput := make([]*StructuredStatsOutput, 0)\n\tati.RLock()\n\tsortedActions := ati.getSortedActions()\n\tfor _, action := range sortedActions {\n\t\toutput = append(output, &StructuredStatsOutput{\n\t\t\tAction: action,\n\t\t\tAvg: math.Round(ati.actions[action].value*1000) / 1000, //round to the nearest 3 decimal places\n\t\t})\n\t}\n\tati.RUnlock()\n\tstatsBytes, err := ati.OutputFormatter(output)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"programming error detected: %+v\", err))\n\t}\n\treturn string(statsBytes)\n}", "func NewFormatter(w io.Writer, fmt string) (*Formatter, error) {\n\tif fmt == \"\" {\n\t\tfmt = DefaultTemplate\n\t}\n\ttmpl, err := template.New(\"out\").Parse(fmt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Formatter{\n\t\toutput: w,\n\t\ttemplate: tmpl,\n\t\tColorize: false,\n\t\tShowFields: true,\n\t\tMaxFieldLength: 30,\n\t\tShowPrefix: true,\n\t\tShowSuffix: true,\n\t\tExcludeFields: defaultExcludes,\n\t}, nil\n}", "func CreateEntityStats(params ...interface{}) engosdl.IComponent {\n\tif len(params) == 2 {\n\t\treturn NewEntityStats(params[0].(string), params[1].(int))\n\t}\n\treturn NewEntityStats(\"\", 0)\n}", "func (cd *Codec) Stats() *Stats {\n\tstats := Stats{\n\t\tHits: atomic.LoadUint64(&cd.hits),\n\t\tMisses: atomic.LoadUint64(&cd.misses),\n\t}\n\tif cd.localCache != nil {\n\t\tstats.LocalHits = atomic.LoadUint64(&cd.localHits)\n\t\tstats.LocalMisses = atomic.LoadUint64(&cd.localMisses)\n\t}\n\treturn &stats\n}", "func (m *Muxer) CtxFormat() *avformat.Context {\n\treturn m.ctxFormat\n}", "func statsHandler(w http.ResponseWriter, r *http.Request) {\n\tdata := core.GetStats()\n\tb, err := json.Marshal(data)\n\tif err != nil {\n\t\thttp.Error(w, \"Error marshalling JSON\", http.StatusInternalServerError)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tfmt.Fprintf(w, \"%s\", b)\n}", "func Newf(c int, format string, a ...interface{}) *Status {\n\treturn New(c, fmt.Sprintf(format, a...))\n}", "func (w *Window) Format() int {\n\treturn int(C.ANativeWindow_getFormat(w.cptr()))\n}", "func New(done <-chan bool) (*Stats, error) {\n\ts := &Stats{\n\t\tregistry: cache.New(*cmd.DefaultOptions().Registry),\n\t\tclient: *cmd.DefaultOptions().Client,\n\t}\n\n\tif err := s.scan(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.Start(done)\n\treturn s, nil\n}", "func Benchmark_Ctx_Format_HTML(b *testing.B) {\n\tapp := New()\n\tc := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(c)\n\tc.Fasthttp.Request.Header.Set(\"Accept\", \"text/html\")\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tc.Format(\"Hello, World!\")\n\t}\n\tutils.AssertEqual(b, \"<p>Hello, World!</p>\", string(c.Fasthttp.Response.Body()))\n}", "func Format(bytesPerSecond float64) (value, unit string) {\n\t// to make it bits per second\n\tnumPerSecond := bytesPerSecond * 8\n\ti := 0\n\tfor ; i < 3; i++ {\n\t\tif numPerSecond < 1024 {\n\t\t\tbreak\n\t\t}\n\t\tnumPerSecond /= 1024\n\t}\n\treturn fmt.Sprintf(\"%.2f\", numPerSecond), units[i]\n}", "func GetStats() string {\n\treturn fmt.Sprintf(\"Bot: %s System: %s Go: %s\", getBotUsageStats(), getSystemUsageStats(), getGoStats())\n}", "func (o JsonSerializationResponseOutput) Format() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v JsonSerializationResponse) *string { return v.Format }).(pulumi.StringPtrOutput)\n}", "func NewRuntimeStats(scope Scope) StatGenerator {\n\treturn runtimeStats{\n\t\talloc: scope.NewGauge(\"alloc\"),\n\t\ttotalAlloc: scope.NewCounter(\"totalAlloc\"),\n\t\tsys: scope.NewGauge(\"sys\"),\n\t\tlookups: scope.NewCounter(\"lookups\"),\n\t\tmallocs: scope.NewCounter(\"mallocs\"),\n\t\tfrees: scope.NewCounter(\"frees\"),\n\n\t\theapAlloc: scope.NewGauge(\"heapAlloc\"),\n\t\theapSys: scope.NewGauge(\"heapSys\"),\n\t\theapIdle: scope.NewGauge(\"heapIdle\"),\n\t\theapInuse: scope.NewGauge(\"heapInuse\"),\n\t\theapReleased: scope.NewGauge(\"heapReleased\"),\n\t\theapObjects: scope.NewGauge(\"heapObjects\"),\n\n\t\tnextGC: scope.NewGauge(\"nextGC\"),\n\t\tlastGC: scope.NewGauge(\"lastGC\"),\n\t\tpauseTotalNs: scope.NewCounter(\"pauseTotalNs\"),\n\t\tnumGC: scope.NewCounter(\"numGC\"),\n\t\tgcCPUPercent: scope.NewGauge(\"gcCPUPercent\"),\n\n\t\tnumGoroutine: scope.NewGauge(\"numGoroutine\"),\n\t}\n}", "func (a *API) GcStats() *debug.GCStats {\n\ta.logger.Debug(\"debug_gcStats\")\n\ts := new(debug.GCStats)\n\tdebug.ReadGCStats(s)\n\treturn s\n}", "func New(tfProvider *schema.Provider, outputParam string, resourceFormat ResourceFormat) (OutputSink, error) {\n\tswitch resourceFormat {\n\tcase KRMResourceFormat:\n\t\treturn newKRM(tfProvider, outputParam)\n\tcase HCLResourceFormat:\n\t\treturn newHCL(tfProvider, outputParam)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown resource format '%v'\", resourceFormat)\n\t}\n}", "func newMetricsWriter(w http.ResponseWriter, r *http.Request, collector collector) *metricWriter {\n\tinfo := &Info{TimeStart: time.Now(), Request: r, Header: w.Header()}\n\treturn &metricWriter{w: w, info: info, collector: collector}\n}", "func (f Formatter) Format(txt string) (string, error) {\n\ttokens, err := f.l.Scan([]byte(txt))\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to format: %q\", err)\n\t}\n\n\tvar s strings.Builder\n\n\tinSession := true\n\tinPerformance := false\n\n\tfor _, tok := range tokens {\n\t\tswitch tok.Name() {\n\t\tcase \"DATE\":\n\t\t\ts.WriteString(\"@ \")\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\"\\r\\n\")\n\t\tcase \"FAILS\":\n\t\t\ts.WriteString(\" \")\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\"f\")\n\t\tcase \"LOAD\":\n\t\t\tinPerformance = true\n\t\t\ts.WriteString(\"\\r\\n\")\n\t\t\ts.WriteString(\" \")\n\t\t\ts.WriteString(tok.Value())\n\t\tcase \"METADATA\":\n\t\t\ts.WriteString(\"\\r\\n\")\n\t\t\ts.WriteString(spacer(inSession, inPerformance))\n\t\t\ts.WriteString(\"* \")\n\t\t\ts.WriteString(tok.Value())\n\t\tcase \"MOVEMENT\", \"MOVEMENT_SS\":\n\t\t\tinSession = false\n\t\t\tinPerformance = false\n\t\t\ts.WriteString(\"\\r\\n\\r\\n\")\n\t\t\tif tok.Value() == \"MOVEMENT_SS\" {\n\t\t\t\ts.WriteString(\"+ \")\n\t\t\t}\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\":\")\n\t\tcase \"NOTE\":\n\t\t\ts.WriteString(\"\\r\\n\")\n\t\t\ts.WriteString(spacer(inSession, inPerformance))\n\t\t\ts.WriteString(\"* \")\n\t\t\ts.WriteString(tok.Value())\n\t\tcase \"REPS\":\n\t\t\ts.WriteString(\" \")\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\"r\")\n\t\tcase \"SETS\":\n\t\t\ts.WriteString(\" \")\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\"s\")\n\t\t}\n\t}\n\n\treturn s.String(), nil\n}", "func (node *TimestampFuncExpr) Format(buf *TrackedBuffer) {\n\tbuf.astPrintf(node, \"%s(%s, %v, %v)\", node.Name, node.Unit, node.Expr1, node.Expr2)\n}", "func newStatGroup(size uint64) *statGroup {\n\treturn &statGroup{\n\t\tvalues: make([]float64, size),\n\t\tcount: 0,\n\t}\n}", "func (node *VindexSpec) Format(buf *TrackedBuffer) {\n\tbuf.astPrintf(node, \"using %v\", node.Type)\n\n\tnumParams := len(node.Params)\n\tif numParams != 0 {\n\t\tbuf.astPrintf(node, \" with \")\n\t\tfor i, p := range node.Params {\n\t\t\tif i != 0 {\n\t\t\t\tbuf.astPrintf(node, \", \")\n\t\t\t}\n\t\t\tbuf.astPrintf(node, \"%v\", p)\n\t\t}\n\t}\n}", "func RenderSummaryStats(w io.Writer, stats SummaryStats) error {\n\tvar err error\n\t_, err = fmt.Fprintf(w, \"Your total expenses: %.7f DASH\\n\\n\", stats.TotalCost)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttw := tabwriter.NewWriter(w, 8, 8, 1, '\\t', tabwriter.Debug|tabwriter.AlignRight)\n\tvar (\n\t\trequestStats []Stats\n\t\tnetworkStats []Stats\n\t)\n\tfor _, stats := range stats.GroupedStats.Slice() {\n\t\tswitch stats.Type {\n\t\tcase RequestStatsType:\n\t\t\trequestStats = append(requestStats, stats)\n\t\tcase NetworkStatsType:\n\t\t\tnetworkStats = append(networkStats, stats)\n\t\t}\n\t}\n\tif len(requestStats) > 0 {\n\t\t_, err := io.WriteString(w, \"Summary statistics for all performed requests\\n\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = writeTable(tw, []string{\"Request URL\", \"Size/bytes\", \"Elapsed/ms\", \"Cost/dash\"}, requestStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t_, err = fmt.Fprintf(w, \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(networkStats) > 0 {\n\t\t_, err := io.WriteString(w, \"Summary statistics for all used networks\\n\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = writeTable(tw, []string{\"Network\", \"Size/bytes\", \"Elapsed/ms\", \"Cost/dash\"}, networkStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func RenderStats(uiView View, timeframe int) {\n\tif !uiView.UIEnabled {\n\t\tRenderStatsNoUI(uiView, timeframe)\n\t\treturn\n\t}\n\tif timeframe != uiView.ActiveTimeframe {\n\t\t// The updated timeframe is not to be updated\n\t\treturn\n\t}\n\trenderStatisticsLayout(uiView)\n\n\t// Processing the general stats table headers\n\tstatsHeaders := []string{\n\t\t\"Id\",\n\t\t\"Website\",\n\t\t\"Avg (ms)\",\n\t\t\"Max (ms)\",\n\t\t\"Availability\",\n\t}\n\tTable := [][]string{statsHeaders}\n\t// For each URL\n\tvar urlStatistic *statistics.Statistic\n\tfor id, url := range uiView.Urls {\n\t\turlStatistic = uiView.URLStatistics[url][uiView.ActiveTimeframe]\n\t\tif !math.IsNaN(urlStatistic.Average()) {\n\t\t\t// Append Statistics\n\t\t\tTable = append(Table, []string{\n\t\t\t\tfmt.Sprint(id),\n\t\t\t\tShorten(url),\n\t\t\t\tfmt.Sprintf(\"%.0f\", urlStatistic.Average()),\n\t\t\t\tfmt.Sprintf(\"%v\", urlStatistic.MaxResponseTime()),\n\t\t\t\tfmt.Sprintf(\"%.0f%%\", urlStatistic.Availability()*100.0),\n\t\t\t})\n\t\t}\n\t}\n\t// Rendering the updated table\n\trenderStatTable(Table)\n\n\t// Processing the detailed view\n\tdetailedStatistics := uiView.URLStatistics[uiView.Urls[uiView.ActiveWebsite]]\n\tdetailedHeaders := []string{\n\t\t\"TimeFrame\",\n\t\t\"Avg (ms)\",\n\t\t\"Max (ms)\",\n\t\t\"Availability\",\n\t\t\"Codes\",\n\t}\n\tdetailTable := [][]string{detailedHeaders}\n\tfor id, statistic := range detailedStatistics {\n\t\tif !math.IsNaN(urlStatistic.Average()) {\n\t\t\t// Append Statistics\n\t\t\tdetailTable = append(detailTable, []string{\n\t\t\t\tuiView.TimeframeRepr[id],\n\t\t\t\tfmt.Sprintf(\"%.0f\", statistic.Average()),\n\t\t\t\tfmt.Sprintf(\"%v\", statistic.MaxResponseTime()),\n\t\t\t\tfmt.Sprintf(\"%.0f%%\", statistic.Availability()*100.0),\n\t\t\t\tStatusCodeMapToString(statistic.StatusCodeCount),\n\t\t\t})\n\t\t}\n\t}\n\t// Processing the sparkline graph\n\tplotValues := detailedStatistics[2].RecentResponseTime()\n\n\t// Rendering the detailed view\n\trenderStatDetails(uiView, detailTable, plotValues)\n}", "func (t trace) Stats() *Stats {\n\tnow := time.Now()\n\n\treturn &Stats{\n\t\tTLS: t.TLS(),\n\t\tTimeDNS: t.TimeDNS(),\n\t\tTimeConnect: t.TimeConnect(),\n\t\tTimeTLS: t.TimeTLS(),\n\t\tTimeWait: t.TimeWait(),\n\t\tTimeResponse: t.TimeResponse(now),\n\t\tTimeDownload: t.TimeDownload(now),\n\t\tTimeTotal: t.TimeTotal(now),\n\t}\n}", "func NewFormatter(withoutUnit bool, duration time.Duration) *Formatter {\n\treturn &Formatter{withoutUnit, duration}\n}", "func FormatDebugStats(stats []byte) (string, error) {\n\tvar dogStats map[uint64]metricStat\n\tif err := json.Unmarshal(stats, &dogStats); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// put metrics in order: first is the more frequent\n\torder := make([]uint64, len(dogStats))\n\ti := 0\n\tfor metric := range dogStats {\n\t\torder[i] = metric\n\t\ti++\n\t}\n\n\tsort.Slice(order, func(i, j int) bool {\n\t\treturn dogStats[order[i]].Count > dogStats[order[j]].Count\n\t})\n\n\t// write the response\n\tbuf := bytes.NewBuffer(nil)\n\n\theader := fmt.Sprintf(\"%-40s | %-20s | %-10s | %-20s\\n\", \"Metric\", \"Tags\", \"Count\", \"Last Seen\")\n\tbuf.Write([]byte(header))\n\tbuf.Write([]byte(strings.Repeat(\"-\", len(header)) + \"\\n\"))\n\n\tfor _, key := range order {\n\t\tstats := dogStats[key]\n\t\tbuf.Write([]byte(fmt.Sprintf(\"%-40s | %-20s | %-10d | %-20v\\n\", stats.Name, stats.Tags, stats.Count, stats.LastSeen)))\n\t}\n\n\tif len(dogStats) == 0 {\n\t\tbuf.Write([]byte(\"No metrics processed yet.\"))\n\t}\n\n\treturn buf.String(), nil\n}", "func (s *TypeStruct) Format() error {\n\t// todo\n\treturn nil\n}", "func (ii *IndexInfo) Format(buf *TrackedBuffer) {\n\tif ii.Primary {\n\t\tbuf.astPrintf(ii, \"%s\", ii.Type)\n\t} else {\n\t\tbuf.astPrintf(ii, \"%s\", ii.Type)\n\t\tif !ii.Name.IsEmpty() {\n\t\t\tbuf.astPrintf(ii, \" %v\", ii.Name)\n\t\t}\n\t}\n}", "func (context *Context) NewWriter(out io.Writer) (Writer, error) {\n\tswitch strings.ToLower(context.Format) {\n\tcase \"csv\":\n\t\treturn &csvWriter{Out: out}, nil\n\tcase \"json\":\n\t\treturn &jsonWriter{Out: out}, nil\n\tcase \"toml\":\n\t\treturn &tomlWriter{Out: out}, nil\n\tcase \"yaml\", \"yml\":\n\t\treturn &yamlWriter{Out: out}, nil\n\tcase \"xml\":\n\t\treturn &xmlWriter{Out: out}, nil\n\tcase \"markdown\", \"md\":\n\t\treturn &markdownWriter{Out: out}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%s: unknown format\", context.Format)\n\t}\n}", "func (node *ConvertType) Format(buf *TrackedBuffer) {\n\tbuf.astPrintf(node, \"%s\", node.Type)\n\tif node.Length != nil {\n\t\tbuf.astPrintf(node, \"(%v\", node.Length)\n\t\tif node.Scale != nil {\n\t\t\tbuf.astPrintf(node, \", %v\", node.Scale)\n\t\t}\n\t\tbuf.astPrintf(node, \")\")\n\t}\n\tif node.Charset != \"\" {\n\t\tbuf.astPrintf(node, \"%s %s\", node.Operator.ToString(), node.Charset)\n\t}\n}", "func mergeStats(new stats, old stats) stats {\n\tnew.UptimeSeconds = mergeValue(new.UptimeSeconds, old.UptimeSeconds)\n\tnew.PendingSessionKeys = mergeValue(new.PendingSessionKeys, old.PendingSessionKeys)\n\tnew.ActiveSessions = mergeValue(new.ActiveSessions, old.ActiveSessions)\n\tnew.Connections = mergeValue(new.Connections, old.Connections)\n\tnew.Proxies = mergeValue(new.Proxies, old.Proxies)\n\tnew.BytesProxied = mergeValue(new.BytesProxied, old.BytesProxied)\n\tnew.GoRoutines = mergeValue(new.GoRoutines, old.GoRoutines)\n\tnew.Options.SessionRate = mergeValue(new.Options.SessionRate, old.Options.SessionRate)\n\tnew.Options.GlobalRate = mergeValue(new.Options.GlobalRate, old.Options.GlobalRate)\n\treturn new\n}", "func VertexAttribFormat(attribindex uint32, size int32, xtype uint32, normalized bool, relativeoffset uint32) {\n C.glowVertexAttribFormat(gpVertexAttribFormat, (C.GLuint)(attribindex), (C.GLint)(size), (C.GLenum)(xtype), (C.GLboolean)(boolToInt(normalized)), (C.GLuint)(relativeoffset))\n}", "func NewFmtCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"fmt\",\n\t\tShort: \"fmt helps to deal with data format operations\",\n\t\tPersistentPreRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tvar err error\n\t\t\tif inputBytes, err = getInput(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = getOutput(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t\tPersistentPostRun: func(cmd *cobra.Command, args []string) {\n\t\t\toutput.Close()\n\t\t},\n\t}\n\n\tcmd.PersistentFlags().StringVarP(&inputFile, \"input\", \"i\", \"\", \"Read input from file\")\n\tcmd.PersistentFlags().StringVarP(&outputFile, \"output\", \"o\", \"\", \"Write output to file\")\n\n\treturn cmd\n}", "func Test_Ctx_Format(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\tctx.Fasthttp.Request.Header.Set(HeaderAccept, MIMETextPlain)\n\tctx.Format([]byte(\"Hello, World!\"))\n\tutils.AssertEqual(t, \"Hello, World!\", string(ctx.Fasthttp.Response.Body()))\n\n\tctx.Fasthttp.Request.Header.Set(HeaderAccept, MIMETextHTML)\n\tctx.Format(\"Hello, World!\")\n\tutils.AssertEqual(t, \"<p>Hello, World!</p>\", string(ctx.Fasthttp.Response.Body()))\n\n\tctx.Fasthttp.Request.Header.Set(HeaderAccept, MIMEApplicationJSON)\n\tctx.Format(\"Hello, World!\")\n\tutils.AssertEqual(t, `\"Hello, World!\"`, string(ctx.Fasthttp.Response.Body()))\n\tctx.Format(complex(1, 1))\n\tutils.AssertEqual(t, \"(1+1i)\", string(ctx.Fasthttp.Response.Body()))\n\n\tctx.Fasthttp.Request.Header.Set(HeaderAccept, MIMEApplicationXML)\n\tctx.Format(\"Hello, World!\")\n\tutils.AssertEqual(t, `<string>Hello, World!</string>`, string(ctx.Fasthttp.Response.Body()))\n\tctx.Format(Map{})\n\tutils.AssertEqual(t, \"map[]\", string(ctx.Fasthttp.Response.Body()))\n\n\ttype broken string\n\tctx.Fasthttp.Request.Header.Set(HeaderAccept, \"broken/accept\")\n\tctx.Format(broken(\"Hello, World!\"))\n\tutils.AssertEqual(t, `Hello, World!`, string(ctx.Fasthttp.Response.Body()))\n}" ]
[ "0.615135", "0.58909744", "0.564853", "0.5477506", "0.53987473", "0.5379427", "0.5373916", "0.5269798", "0.5267023", "0.525883", "0.52573013", "0.52511203", "0.52503246", "0.52026254", "0.5195151", "0.5113169", "0.5079528", "0.5066979", "0.5062811", "0.5025183", "0.5025183", "0.49985847", "0.4998104", "0.49873212", "0.49719247", "0.49662286", "0.49053732", "0.49045452", "0.4866946", "0.48316306", "0.4819947", "0.47863638", "0.47584575", "0.47390237", "0.47250605", "0.47158", "0.47125614", "0.47075832", "0.4696546", "0.4692964", "0.46915203", "0.46895403", "0.46889213", "0.4677214", "0.4672261", "0.46716624", "0.46707973", "0.46653605", "0.4662", "0.46599704", "0.4658162", "0.46577713", "0.4652327", "0.4630471", "0.46263745", "0.4625078", "0.46183032", "0.46017796", "0.45952833", "0.4590465", "0.4583994", "0.4581257", "0.45628822", "0.45614186", "0.4555755", "0.45478633", "0.45466408", "0.4545635", "0.4539061", "0.45378762", "0.45375076", "0.45318356", "0.45298946", "0.45182362", "0.4517072", "0.45053673", "0.45046893", "0.4501142", "0.44945523", "0.44932666", "0.44863507", "0.44743857", "0.44734314", "0.44731152", "0.4472995", "0.4463528", "0.44552645", "0.44441903", "0.4441546", "0.44256485", "0.44234854", "0.44190156", "0.4417754", "0.4413848", "0.4411086", "0.4410355", "0.44062307", "0.44006896", "0.439935", "0.43954712" ]
0.7449207
0
NewStats returns a new Stats entity and sets in it the given name
func NewStats(container string) *Stats { return &Stats{StatsEntry: StatsEntry{Container: container}} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewStats() *Stats {\n\tcs := new(Stats)\n\tcs.statMap = make(map[string]*FuncStat)\n\n\treturn cs\n}", "func NewEntityStats(name string, life int) *EntityStats {\n\tengosdl.Logger.Trace().Str(\"component\", \"entity-stats\").Str(\"entity-stats\", name).Msg(\"new entity-stats\")\n\tresult := &EntityStats{\n\t\tComponent: engosdl.NewComponent(name),\n\t\tLife: life,\n\t}\n\treturn result\n}", "func NewStats() Stats {\n\treturn Stats{DeliveryService: map[enum.DeliveryServiceName]Stat{}}\n}", "func New() *Stats {\n\treturn &Stats{\n\t\tStatusCode: map[string]uint64{},\n\t\tMethod: map[string]uint64{},\n\t\tPath: map[string]uint64{},\n\t\tInBytes: 0,\n\t\tOutBytes: 0,\n\t}\n}", "func NewStats() *Stats {\n\tr := &Stats{\n\t\tAdaptiveSelection: make(map[string]AdaptiveSelection, 0),\n\t\tAttributes: make(map[string]string, 0),\n\t\tBreakers: make(map[string]Breaker, 0),\n\t\tScriptCache: make(map[string][]ScriptCache, 0),\n\t\tThreadPool: make(map[string]ThreadCount, 0),\n\t}\n\n\treturn r\n}", "func NewStat() *Stat {\n\treturn &Stat{}\n}", "func NewStats() *Stats {\n\ts := new(Stats)\n\ts.categories = make(map[string][]time.Duration)\n\ts.startedSamples = make(map[string]time.Duration)\n\treturn s\n}", "func newHTTPStats() *httpStats {\n\treturn &httpStats{}\n}", "func NewActionStats() ActionStats {\n stats := ActionStats{}\n stats.stats = make(map[string]*actionData)\n return stats\n}", "func newStats(code, comment, blank int) *Stats {\n\ttotal := code + comment + blank\n\tif total == 0 {\n\t\ttotal = 1\n\t}\n\treturn &Stats{\n\t\tcode,\n\t\tcomment,\n\t\tblank,\n\t\tcode + comment + blank,\n\t\tfloat64(code) / float64(total) * 100,\n\t\tfloat64(comment) / float64(total) * 100,\n\t\tfloat64(blank) / float64(total) * 100,\n\t}\n}", "func New() *Stats {\n\tname, _ := os.Hostname()\n\n\tstats := &Stats{\n\t\tclosed: make(chan struct{}, 1),\n\t\tUptime: time.Now(),\n\t\tPid: os.Getpid(),\n\t\tResponseCounts: map[string]int{},\n\t\tTotalResponseCounts: map[string]int{},\n\t\tTotalResponseTime: time.Time{},\n\t\tHostname: name,\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stats.closed:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tstats.ResetResponseCounts()\n\n\t\t\t\ttime.Sleep(time.Second * 1)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn stats\n}", "func NewStat() *Stat {\n\treturn &Stat{\n\t\tn: 0,\n\t\tmin: math.MaxFloat64,\n\t\tmax: -math.MaxFloat64,\n\t\tsum: 0.0,\n\t\tsum2: 0.0,\n\t}\n}", "func CreateStats(cluster, namespace, volumeName, deploymentName, mountPath, pathRestic, podName string) string {\n\tvar stats map[string]interface{}\n\tvar nameStats string\n\tif cluster == \"ClusterFrom\" {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_from\")\n\t\tnameStats = \"statsFrom\"\n\t} else {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_to\")\n\t\tnameStats = \"statsTo\"\n\t}\n\n\tauxName := \"stats-\" + deploymentName\n\tsizeVolume := utils.GetSizeVolume(podName, volumeName, mountPath)\n\tstats[\"name\"] = auxName\n\tstats[\"size\"] = sizeVolume\n\terr := utils.WriteJson(pathRestic, nameStats, stats)\n\tif err != nil {\n\t\tfmt.Println(\"Error creating \" + auxName)\n\t}\n\treturn sizeVolume\n}", "func (r Resolver) CreateStat(ctx context.Context, args createStatArgs) (*StatResolver, error) {\n\tres, err := r.client.CreateStat(ctx, &pb.CreateStatReq{\n\t\tStat: &pb.Stat{\n\t\t\tAccountID: gqlIDToString(args.Input.AccountID),\n\t\t\tPlayerMove: args.Input.PlayerMove,\n\t\t},\n\t})\n\treturn statRes(res, err)\n}", "func NewFuncStat(name string) *FuncStat {\n\tvar stat = new(FuncStat)\n\tstat.Name = name\n\tstat.Worker = NewCounter(0)\n\tstat.Job = NewCounter(0)\n\tstat.Processing = NewCounter(0)\n\treturn stat\n}", "func (cs *Stats) CreateStats(fID string) error {\n\tif _, isPresent := cs.statMap[fID]; isPresent {\n\t\treturn errors.New(\"Stat exists\")\n\t}\n\n\tcs.statMap[fID] = new(FuncStat)\n\n\treturn nil\n}", "func New() *MemStats {\n\treturn &MemStats{\n\t\tvalues: map[string]int64{},\n\t}\n}", "func newStatGroup(size uint64) *statGroup {\n\treturn &statGroup{\n\t\tvalues: make([]float64, size),\n\t\tcount: 0,\n\t}\n}", "func New(done <-chan bool) (*Stats, error) {\n\ts := &Stats{\n\t\tregistry: cache.New(*cmd.DefaultOptions().Registry),\n\t\tclient: *cmd.DefaultOptions().Client,\n\t}\n\n\tif err := s.scan(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.Start(done)\n\treturn s, nil\n}", "func CreateEntityStats(params ...interface{}) engosdl.IComponent {\n\tif len(params) == 2 {\n\t\treturn NewEntityStats(params[0].(string), params[1].(int))\n\t}\n\treturn NewEntityStats(\"\", 0)\n}", "func (p *stats) Name() string {\n\treturn \"Stats\"\n}", "func newConnStats() *ConnStats {\n\treturn &ConnStats{}\n}", "func newConnStats() *ConnStats {\n\treturn &ConnStats{}\n}", "func New(evt *eventbus.EventBus) *TileStats {\n\tret := &TileStats{\n\t\tevt: evt,\n\t\tstats: map[string]*TraceStats{},\n\t}\n\tevt.SubscribeAsync(db.NEW_TILE_AVAILABLE_EVENT, func(it interface{}) {\n\t\ttile := it.(*tiling.Tile)\n\t\tglog.Info(\"TileStats: Beginning.\")\n\t\tret.calcStats(tile)\n\t\tglog.Info(\"TileStats: Finished.\")\n\t})\n\n\treturn ret\n}", "func NewStatistics(votes *Vote) Statistics {\n\n\ttr := make(map[string][]string)\n\tfor u, v := range votes.Votes {\n\t\ttr[v] = append(tr[v], u)\n\t}\n\n\ttotal := len(votes.Votes)\n\n\tlog.WithFields(log.Fields{\n\t\t\"total\": total,\n\t}).Info(\"Statistics struct generated\")\n\n\treturn Statistics{\n\t\tTotal: total,\n\t\tTransformed: tr,\n\t}\n\n}", "func NewStats() (*Stats, error) {\n\t// TODO: Make it singleton if possible.\n\tprocess, err := process.NewProcess(int32(os.Getpid()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar cpuUsage float64\n\tif c, err := process.CPUPercent(); err == nil {\n\t\tcpuUsage = c\n\t}\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\treturn &Stats{\n\t\tGoroutines: runtime.NumGoroutine(),\n\t\tCPUUsage: cpuUsage,\n\t\tMemStats: MemStats{\n\t\t\tHeapAlloc: m.HeapAlloc,\n\t\t\tHeapIdle: m.HeapIdle,\n\t\t\tHeapInuse: m.HeapInuse,\n\t\t},\n\t}, nil\n}", "func (f *Framework) NewStatsCollector(name, version string) *StatsCollector {\n\tif f.StressTestLevel > 0 {\n\t\tname = fmt.Sprintf(\"stress_%v_%v\", f.StressTestLevel, name)\n\t}\n\treturn &StatsCollector{name: name, outputDir: f.PerfOutputDir, version: version}\n}", "func NewStatMessage(stats map[string]int, init, end int64) *StatMessage {\n\treturn &StatMessage{\n\t\tMessage: Message{TypeStat},\n\t\tStats: stats,\n\t\tInit: init,\n\t\tEnd: end,\n\t}\n}", "func NewPortStats(names []string) PortStats {\n\tstatVals := PortStats{}\n\tfor _, name := range names {\n\t\tstatVal, err := opennsl.ParseStatVal(name)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"ParsePortStats error. %s\", err)\n\t\t} else {\n\t\t\tstatVals = append(statVals, statVal)\n\t\t}\n\t}\n\n\treturn statVals\n}", "func (e *Exporter) NewRates(name string, singleCountVar multiCountVar, samples int, interval time.Duration) *stats.Rates {\n\tif e.name == \"\" || name == \"\" {\n\t\tv := stats.NewRates(name, singleCountVar, samples, interval)\n\t\taddUnnamedExport(name, v)\n\t\treturn v\n\t}\n\n\texporterMu.Lock()\n\tdefer exporterMu.Unlock()\n\n\tif v, ok := unnamedExports[name]; ok {\n\t\treturn v.(*stats.Rates)\n\t}\n\n\tov, ok := exportedOtherStatsVars[name]\n\tif !ok {\n\t\tov = expvar.NewMap(name)\n\t\texportedOtherStatsVars[name] = ov\n\t}\n\tif lvar := ov.Get(e.name); lvar != nil {\n\t\treturn lvar.(*stats.Rates)\n\t}\n\n\trates := stats.NewRates(\"\", singleCountVar, samples, interval)\n\tov.Set(e.name, rates)\n\treturn rates\n}", "func (e StatRepository) AddStat(stat models.EntityStats) (models.EntityStats, error) {\n\tc := e.newStatCollection()\n\tdefer c.Close()\n\tstat.CreatedAt = time.Now()\n\treturn stat, c.Collection.Insert(stat)\n}", "func newElementStats() *elementStats {\n\treturn &elementStats{\n\t\tRanges: map[osm.Type]*idRange{\n\t\t\tosm.TypeNode: {Min: math.MaxInt64},\n\t\t\tosm.TypeWay: {Min: math.MaxInt64},\n\t\t\tosm.TypeRelation: {Min: math.MaxInt64},\n\t\t},\n\t}\n}", "func NewStatistics(ns string, la int64, lbr int64) *Statistics {\n\treturn &Statistics{\n\t\tNamespace: ns,\n\t\tLastAccessed: la,\n\t\tLastBufferedRequest: lbr,\n\t}\n}", "func newStatsReporter() (*reporter, error) {\n\treturn &reporter{}, nil\n}", "func NewName(v string) predicate.User {\n\treturn predicate.User(sql.FieldEQ(FieldNewName, v))\n}", "func New() *RunningStats {\n\treturn &RunningStats{}\n}", "func GetStats(storer store.Storer) *Stats {\n\t// get the mutants count\n\tvalue1, err := storer.Get(mutantCountKey)\n\tif err != nil {\n\t\tvalue1 = \"0\"\n\t}\n\tmutants, err := strconv.Atoi(value1)\n\tif err != nil {\n\t\tmutants = 0\n\t}\n\n\t// get the humans count\n\tvalue2, err := storer.Get(humanCountKey)\n\tif err != nil {\n\t\tvalue2 = \"0\"\n\t}\n\thumans, err := strconv.Atoi(value2)\n\tif err != nil {\n\t\tmutants = 0\n\t}\n\n\t// get the ratio between humans and mutants\n\tratio := math.Round(float64(mutants)/float64(humans)*100) / 100\n\n\treturn &Stats{\n\t\tMutants: mutants,\n\t\tHumans: humans,\n\t\tRatio: ratio,\n\t}\n}", "func New(addr string) (*Provider, error) {\n\tclient, err := statsd.New(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp := Provider{\n\t\tclient: client,\n\t\trate: 1,\n\t}\n\treturn &p, nil\n}", "func NewNameGT(v string) predicate.User {\n\treturn predicate.User(sql.FieldGT(FieldNewName, v))\n}", "func (h *StatsHandlers) createStatsRecord(c *gin.Context) {\n\n\tfreeMemory, err := strconv.ParseUint(c.PostForm(\"freeMemory\"), 10, 64)\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't read free memory\")\n\t\tpanic(err)\n\t}\n\n\tuptime, err := strconv.ParseUint(c.PostForm(\"uptime\"), 10, 64)\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't read uptime\")\n\t\tpanic(err)\n\t}\n\n\ttemperature, err := strconv.ParseFloat(c.PostForm(\"temp\"), 64)\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't read temp\")\n\t\tpanic(err)\n\t}\n\n\tcpuTemp, err := strconv.ParseFloat(c.PostForm(\"cpuTemp\"), 64)\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't read cpu temp\")\n\t\tpanic(err)\n\t}\n\n\tambientTemp, err := strconv.ParseFloat(c.PostForm(\"ambientTemp\"), 64)\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't read ambient temp\")\n\t\tpanic(err)\n\t}\n\n\thumidity, err := strconv.ParseFloat(c.PostForm(\"humidity\"), 64)\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't read humidity\")\n\t\tpanic(err)\n\t}\n\n\tdb, ok := c.MustGet(\"databaseConn\").(*gorm.DB)\n\tif !ok {\n\t\treturn\n\t}\n\n\tnewStats := models.Stats{\n\t\tFreeMemory: freeMemory,\n\t\tUptime: uptime,\n\t\tTemperature: temperature,\n\t\tAmbientTemperature: ambientTemp,\n\t\tCPUTemperature: cpuTemp,\n\t\tHumidity: humidity}\n\n\tfmt.Println(newStats)\n\tdb.Save(&newStats)\n\n\tc.Status(http.StatusOK)\n}", "func (p *Player) AddStat(name string, max int) {\n\ty := p.y + len(p.stats)\n\tsb := NewStatbar(p.x, y, name, max, p.label)\n\tp.stats = append(p.stats, sb)\n}", "func (fs FS) NewStat() (Stat, error) {\n\treturn fs.Stat()\n}", "func (r *Search) Stats(stats ...string) *Search {\n\tr.req.Stats = stats\n\n\treturn r\n}", "func NewStatistics() Statistics {\n\treturn Statistics{\n\t\tMean: NewDayTypeTimeseries(),\n\t\tStdev: NewDayTypeTimeseries(),\n\t}\n}", "func InitialStats(flowName string, all TaskIDs) *Stats {\n\treturn &Stats{\n\t\tflowName,\n\t\tall,\n\t\tNewTaskIDs(),\n\t\tNewTaskIDs(),\n\t\tNewTaskIDs(),\n\t\tall.Copy(),\n\t}\n}", "func (h *Handler) CreateStat(u *models.PingData) (string, error) {\n\tif _, err := h.Storage.InsertStat(u); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"unable to create site\")\n\t}\n\treturn \"\", nil\n}", "func (s *Simulator) Stats() *Stats {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\telapsed := time.Since(s.now).Seconds()\n\tpThrough := float64(s.writtenN) / elapsed\n\trespMean := 0\n\tif len(s.latencyHistory) > 0 {\n\t\trespMean = int(s.totalLatency) / len(s.latencyHistory) / int(time.Millisecond)\n\t}\n\tstats := &Stats{\n\t\tTime: time.Unix(0, int64(time.Since(s.now))),\n\t\tTags: s.ReportTags,\n\t\tFields: models.Fields(map[string]interface{}{\n\t\t\t\"T\": int(elapsed),\n\t\t\t\"points_written\": s.writtenN,\n\t\t\t\"values_written\": s.writtenN * s.FieldsPerPoint,\n\t\t\t\"points_ps\": pThrough,\n\t\t\t\"values_ps\": pThrough * float64(s.FieldsPerPoint),\n\t\t\t\"write_error\": s.currentErrors,\n\t\t\t\"resp_wma\": int(s.wmaLatency),\n\t\t\t\"resp_mean\": respMean,\n\t\t\t\"resp_90\": int(s.quartileResponse(0.9) / time.Millisecond),\n\t\t\t\"resp_95\": int(s.quartileResponse(0.95) / time.Millisecond),\n\t\t\t\"resp_99\": int(s.quartileResponse(0.99) / time.Millisecond),\n\t\t}),\n\t}\n\n\tvar isCreating bool\n\tif s.writtenN < s.SeriesN() {\n\t\tisCreating = true\n\t}\n\tstats.Tags[\"creating_series\"] = fmt.Sprint(isCreating)\n\n\t// Reset error count for next reporting.\n\ts.currentErrors = 0\n\n\t// Add runtime stats for the remote instance.\n\tvar vars Vars\n\tresp, err := http.Get(strings.TrimSuffix(s.Host, \"/\") + \"/debug/vars\")\n\tif err != nil {\n\t\t// Don't log error as it can get spammy.\n\t\treturn stats\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := json.NewDecoder(resp.Body).Decode(&vars); err != nil {\n\t\tfmt.Fprintln(s.Stderr, err)\n\t\treturn stats\n\t}\n\n\tstats.Fields[\"heap_alloc\"] = vars.Memstats.HeapAlloc\n\tstats.Fields[\"heap_in_use\"] = vars.Memstats.HeapInUse\n\tstats.Fields[\"heap_objects\"] = vars.Memstats.HeapObjects\n\treturn stats\n}", "func NewStatsdNew(addr string, options ...statsd.Option) *statsd.Client {\n\tclient, err := statsd.New(addr, options...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn client\n}", "func (a *dbeat) newNetStats(stats *types.StatsJSON) *NetStats {\n\tvar net = &NetStats{Time: stats.Read}\n\tfor _, netStats := range stats.Networks {\n\t\tnet.RxBytes += netStats.RxBytes\n\t\tnet.RxDropped += netStats.RxDropped\n\t\tnet.RxErrors += netStats.RxErrors\n\t\tnet.RxPackets += netStats.RxPackets\n\t\tnet.TxBytes += netStats.TxBytes\n\t\tnet.TxDropped += netStats.TxDropped\n\t\tnet.TxErrors += netStats.TxErrors\n\t\tnet.TxPackets += netStats.TxPackets\n\t}\n\treturn net\n}", "func NewTeamSummary()(*TeamSummary) {\n m := &TeamSummary{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func NewStatsFormat(source, osType string) formatter.Format {\n\tif source == formatter.TableFormatKey {\n\t\tif osType == winOSType {\n\t\t\treturn formatter.Format(winDefaultStatsTableFormat)\n\t\t}\n\t\treturn formatter.Format(defaultStatsTableFormat)\n\t} else if source == formatter.AutoRangeFormatKey {\n\t\treturn formatter.Format(autoRangeStatsTableFormat)\n\t}\n\treturn formatter.Format(source)\n}", "func New(name string, rate float64, tags ...string) Metric {\n\treturn Metric{name, rate, tags}\n}", "func NewStatsList() *StatsList {\n\tcfg := &Config{\n\t\tDebug: false,\n\t\tEndpoint: \"http://localhost:8080/stats\",\n\t}\n\tlogger := log.New(os.Stdout, \"[stanica]: \", log.Ldate)\n\tclient := NewClient(cfg, &http.Client{}, logger)\n\tst := &StatsList{\n\t\tClient: client,\n\t}\n\treturn st\n}", "func NewGlobalStats() *GlobalStats {\n\treturn &GlobalStats{\n\t\tnowScraping: Tracker{\n\t\t\tBlog: make(map[*User]bool),\n\t\t},\n\t}\n}", "func (s StatsEntry) Name() string {\n\treturn s.name\n}", "func New(name, key string) (*GetMatchHistory, *SummonerName, error) {\n\n\tif len(key) == 0 || len(key) == 0 {\n\t\treturn nil, nil, fmt.Errorf(\"One of your variables is empty, \\nname:%v \\nkey:%v \", name, key)\n\t}\n\trespBody, err := GetInfo(nameURL, name, key)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Somethings gone wrong: %v\", err)\n\t}\n\tnewStruct, err := MaKeSummonerName(respBody)\n\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Couldn't build struct after polling api %v\", err)\n\t}\n\treturn &GetMatchHistory{name: name, key: key, encryptedID: newStruct.ID}, &newStruct, nil\n}", "func New(memory_amount int64, tcp_port string, udp_port string,\n\t conn_max int, verbosity int, cas_disabled bool, flush_disabled bool) *ServerStat {\n\treturn &ServerStat {\n\t\tpid: os.Getpid(),\n\t\tinit_ts: time.Now().Unix(),\n\t\tlimit_maxbytes: memory_amount,\n\t\ttcp: tcp_port,\n\t\tudp: udp_port,\n\t\tverbosity: verbosity,\n\t\tcas_disabled: cas_disabled,\n\t\tflush_disabled: flush_disabled,\n\t\tConnections: make(map[string] *ConnectionStat),\n\t\tConnections_limit: conn_max,\n\t\tCurrent_connections: 0,\n\t\tTotal_connections: 0,\n\t\tRead_bytes: 0,\n\t\tWritten_bytes: 0,\n\t\tCommands: make(map[string] uint64),\n\t}\n}", "func NewStatsQuery() *StatsQuery {\n\n\treturn &StatsQuery{\n\t\tModelVersion: 1,\n\t\tFields: []string{},\n\t\tGroups: []string{},\n\t\tLimit: -1,\n\t\tMeasurement: StatsQueryMeasurementFlows,\n\t\tOffset: -1,\n\t\tResults: []*TimeSeriesQueryResults{},\n\t}\n}", "func newRate(name string) (rate, error) {\n\tfor _, r := range allRates {\n\t\tif r.Name == name {\n\t\t\treturn r, nil\n\t\t}\n\t}\n\treturn rate{}, errors.New(\"rate not found\")\n}", "func NewLabelStat(label Name) LabelStat {\n\treturn LabelStat{Location: label.Location, Name: label}\n}", "func NewStatsExporter(ctx context.Context, opts *Options) (*StatsExporter, error) {\n\tclient, err := newMetricClient(ctx, opts.ClientOptions...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create a metric client: %v\", err)\n\t}\n\n\te := &StatsExporter{\n\t\tctx: ctx,\n\t\tclient: client,\n\t\topts: opts,\n\t\tprojDataMap: make(map[string]*projectData),\n\t}\n\n\t// We don't want to modify user-supplied options, so save default options directly in\n\t// exporter.\n\tif opts.GetProjectID != nil {\n\t\te.getProjectID = opts.GetProjectID\n\t} else {\n\t\te.getProjectID = defaultGetProjectID\n\t}\n\tif opts.OnError != nil {\n\t\te.onError = opts.OnError\n\t} else {\n\t\te.onError = defaultOnError\n\t}\n\tif opts.MakeResource != nil {\n\t\te.makeResource = opts.MakeResource\n\t} else {\n\t\te.makeResource = defaultMakeResource\n\t}\n\n\treturn e, nil\n}", "func NewLogStats(ctx context.Context, methodName, sql, sessionUUID string, bindVars map[string]*querypb.BindVariable) *LogStats {\n\treturn &LogStats{\n\t\tCtx: ctx,\n\t\tMethod: methodName,\n\t\tSQL: sql,\n\t\tSessionUUID: sessionUUID,\n\t\tBindVariables: bindVars,\n\t\tStartTime: time.Now(),\n\t}\n}", "func newStatsProvider(\n\tcadvisor cadvisor.Interface,\n\tpodManager PodManager,\n\truntimeCache kubecontainer.RuntimeCache,\n\tcontainerStatsProvider containerStatsProvider,\n) *Provider {\n\treturn &Provider{\n\t\tcadvisor: cadvisor,\n\t\tpodManager: podManager,\n\t\truntimeCache: runtimeCache,\n\t\tcontainerStatsProvider: containerStatsProvider,\n\t}\n}", "func NewSpaceStat(dbfTable *godbf.DbfTable, rowDataMap *rowDataMap) *SpacesStat {\n\tss := &SpacesStat{dbfTable: dbfTable, rowDataMap: rowDataMap}\n\tss.setNumFields(dbfTable, rowDataMap)\n\n\treturn ss\n}", "func NewStatsPeriod() (record *StatsPeriod) {\n\treturn new(StatsPeriod)\n}", "func NewDatafeedStats() *DatafeedStats {\n\tr := &DatafeedStats{}\n\n\treturn r\n}", "func (ms *MetricSet) NewAvg(name string) *AvgMetric {\n\tm := new(AvgMetric)\n\tm.name = name\n\tm.c = ms.metricChan\n\n\tif _, found := ms.states[name]; found {\n\t\tpanic(fmt.Sprintf(\"Metric '%s' already exists\"))\n\t}\n\tms.states[name] = &metricState{\n\t\tType: METRIC_AVG,\n\t\tValue: &metricAvgState{},\n\t}\n\n\treturn m\n}", "func NewPortStats() PortStats {\n\treturn []map[string]interface{}{}\n}", "func GetStatsByName(t *testing.T, ing ingress.Instance, statsName string) (int, error) {\n\tgatewayStats, err := ing.ProxyStats()\n\tif err == nil {\n\t\tsdsUpdates, hasSdsStats := gatewayStats[statsName]\n\t\tif hasSdsStats {\n\t\t\treturn sdsUpdates, nil\n\t\t}\n\t}\n\treturn 0, fmt.Errorf(\"unable to get ingress gateway proxy sds stats: %v\", err)\n}", "func New(name errors.Op) *Metric {\n\treturn &Metric{\n\t\tName: name,\n\t}\n}", "func NewNameGT(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldNewName), v))\n\t})\n}", "func NewHealth() *Health {\n\tconst vertexShader = `\n\t\t#version 410\n\n\t\tin vec4 coord;\n\t\tout vec2 tcoord;\n\n\t\tvoid main(void) {\n\t\t\tgl_Position = vec4(coord.xy, 0, 1);\n\t\t\ttcoord = coord.zw;\n\t\t}\n\t`\n\n\tconst fragmentShader = `\n\t\t#version 410\n\n\t\tin vec2 tcoord;\n\t\tuniform sampler2D tex;\n\t\tout vec4 frag_color;\n\n\t\tvoid main(void) {\n\t\t\tvec4 texel = texture(tex, tcoord);\n\t\t\tfrag_color = texel;\n\t\t}\n\t`\n\n\th := Health{}\n\n\th.program = createProgram(vertexShader, fragmentShader)\n\tbindAttribute(h.program, 0, \"coord\")\n\n\th.textureUniform = uniformLocation(h.program, \"tex\")\n\th.pointsVBO = newVBO()\n\th.drawableVAO = newPointsVAO(h.pointsVBO, 4)\n\n\trgba, _ := LoadImages([]string{\"textures/health.png\", \"textures/health-empty.png\"}, 16)\n\th.textureUnit = 4\n\tgl.ActiveTexture(uint32(gl.TEXTURE0 + h.textureUnit))\n\tgl.GenTextures(1, &h.texture)\n\tgl.BindTexture(gl.TEXTURE_2D, h.texture)\n\n\tgl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)\n\tgl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)\n\tgl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR)\n\tgl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)\n\n\tgl.TexImage2D(\n\t\tgl.TEXTURE_2D,\n\t\t0,\n\t\tgl.RGBA,\n\t\tint32(rgba.Rect.Size().X),\n\t\tint32(rgba.Rect.Size().Y),\n\t\t0,\n\t\tgl.RGBA,\n\t\tgl.UNSIGNED_BYTE,\n\t\tgl.Ptr(rgba.Pix),\n\t)\n\n\tgl.GenerateMipmap(gl.TEXTURE_2D)\n\treturn &h\n}", "func MakeHandleStats(stats *Stats) func(w http.ResponseWriter, r *http.Request){\r\n return func (w http.ResponseWriter, r *http.Request){\r\n //Note totalTime does not picked up by JSON as it's lowercase\r\n jsonStats, err := json.Marshal(stats)\r\n if err != nil {\r\n sendError(w, \"404 Error getting JSON object\")\r\n return\r\n }\r\n w.Header().Set(\"Content-Type\", \"application/json\")\r\n f, err := w.Write(jsonStats)\r\n checkError(f, err)\r\n }\r\n}", "func (moment *MomentStatsItemSet) GetAndCreateStatsItem(statsKey string) *MomentStatsItem {\r\n\tmoment.Lock()\r\n\tdefer moment.Unlock()\r\n\tdefer utils.RecoveredFn()\r\n\r\n\tstatsItem := moment.StatsItemTable[statsKey]\r\n\tif nil == statsItem {\r\n\t\tstatsItem = NewMomentStatsItem()\r\n\t\tstatsItem.StatsName = moment.StatsName\r\n\t\tstatsItem.StatsKey = statsKey\r\n\t\tmoment.StatsItemTable[statsKey] = statsItem\r\n\t}\r\n\treturn statsItem\r\n}", "func NewName(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldNewName), v))\n\t})\n}", "func newGetScoreRequest(name string) *http.Request {\n\treq, _ := http.NewRequest(http.MethodGet, fmt.Sprintf(\"/players/%s\", name), nil)\n\treturn req\n}", "func NewNameGTE(v string) predicate.User {\n\treturn predicate.User(sql.FieldGTE(FieldNewName, v))\n}", "func (mr *MatchResponse) GetStatsByName() (s map[string]*PlayerStats) {\n\ts = make(map[string]*PlayerStats)\n\tfor _, p := range mr.Players {\n\t\ts[p.Attributes.Name] = &p.Attributes.Stats\n\t}\n\treturn\n}", "func newVisual(name string) *Visual {\n\tvisual := Visual{ID: uuid.New(), Name: name} // FIXME: uuid is randomly generated, so there could be a collission\n\n\treturn &visual\n}", "func newPasswordHasherStats(logger *log.Logger) *passwordHasherStats {\n\treturn &passwordHasherStats{\n\t\tqueue: make(chan microseconds, 1),\n\t\ttimes: make([]microseconds, 0),\n\t\tlogger: logger,\n\t}\n}", "func TestNewStatistics(t *testing.T) {\n\tstats := NewStatistics()\n\n\tassert.Zero(t, stats.GetPktLoss())\n\tassert.Zero(t, stats.GetRTTAvg())\n\tassert.Zero(t, stats.GetRTTMDev())\n\tassert.Zero(t, stats.GetRTTMax())\n\tassert.Zero(t, stats.GetRTTMin())\n\tassert.Zero(t, stats.GetTotalPending())\n\tassert.Zero(t, stats.GetTotalRecv())\n\tassert.Zero(t, stats.GetTotalSent())\n\tassert.Zero(t, stats.GetTotalTTLExpired())\n\tassert.Zero(t, stats.GetTotalTimedOut())\n}", "func NewStatRepo(rawStats <-chan *stat.Stat, shutdown <-chan bool) *StatRepo {\n\treturn &StatRepo{\n\t\trawStats: rawStats,\n\t\tshutdown: shutdown,\n\t}\n}", "func (c *Client) Stats(ctx context.Context, data *StatsRequest) (*StatsResponse, error) {\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, \"/?Action=Stats\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq := req.URL.Query()\n\tif v := data.Version; v != nil {\n\t\tq.Add(\"Version\", *v)\n\t}\n\treq.URL.RawQuery = q.Encode()\n\n\tif v := data.XTopService; v != nil {\n\t\treq.Header.Set(\"X-Top-Service\", *v)\n\t}\n\tif v := data.XTopRegion; v != nil {\n\t\treq.Header.Set(\"X-Top-Region\", *v)\n\t}\n\n\tif v, ok := ctx.Value(\"K_LOGID\").(string); ok {\n\t\treq.Header.Set(\"X-TT-LOGID\", v)\n\t}\n\n\tresp, err := c.do(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar payload StatsResponse\n\td := json.NewDecoder(resp.Body)\n\tif err := d.Decode(&payload); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &payload, nil\n}", "func NewStatsController(repository comic.StatsRepository) *StatsController {\n\treturn &StatsController{\n\t\tstatsRepository: repository,\n\t}\n}", "func (fs FS) NewFNATStats() (FNATStats, error) {\n\tfile, err := os.Open(fs.Path(\"net/ip_vs_stats\"))\n\tif err != nil {\n\t\treturn FNATStats{}, err\n\t}\n\tdefer file.Close()\n\n\treturn parseFNATStats(file)\n}", "func GetStats() Stats {\r\n\r\n\treturn stats\r\n}", "func createStat(votes *Vote) string {\n\n\tstats := NewStatistics(votes)\n\n\tstr := \"Total: \" + strconv.Itoa(stats.Total) + \"\\n\"\n\tfor value, users := range stats.Transformed {\n\t\tstr += value + \" (\" + strconv.Itoa(len(users)) + \"): \" + strings.Join(users, \", \") + \"\\n\"\n\t}\n\n\treturn str\n\n}", "func NewCmdDBStats(o *StatsOption) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"stats\",\n\t\tShort: \"Get statistics\",\n\t\tLong: `This command get statistics`,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tif err := o.Validate(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn o.Run()\n\t\t},\n\t}\n\n\tcmd.Flags().StringVarP(&o.queryName, \"query\", \"q\", \"\", \"stats query name\")\n\tcmd.Flags().IntVarP(&o.year, \"year\", \"y\", 0, \"year\")\n\n\treturn cmd\n}", "func (c *socks5ClientConn) CreateStatsEvent() stats.Event {\n\tread, written := c.GetBytes()\n\tevent := stats.Event{\n\t\tClientAddr: c.GetClientAddr(),\n\t\tInternalAddr: c.GetInternalAddr(),\n\t\tExternalAddr: c.GetExternalAddr(),\n\t\tRemoteAddr: c.request.RemoteAddr.TCPAddr,\n\n\t\tBytesRead: read,\n\t\tBytesWritten: written,\n\n\t\tSocksCommand: c.request.Command,\n\t\tSocksReplyCode: c.lastReplyCode,\n\n\t\tElapsed: time.Now().Sub(c.GetStartTime()),\n\t}\n\n\treturn event\n}", "func (s Obj_value) NewName(n int32) (capnp.TextList, error) {\n\ts.Struct.SetUint16(4, 0)\n\tl, err := capnp.NewTextList(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn capnp.TextList{}, err\n\t}\n\terr = s.Struct.SetPtr(0, l.List.ToPtr())\n\treturn l, err\n}", "func NewNameContains(v string) predicate.User {\n\treturn predicate.User(sql.FieldContains(FieldNewName, v))\n}", "func NewStatistics(loggingPeriod time.Duration) *Statistics {\n\tsw := Statistics{\n\t\tstatistics: make(chan uint8, statisticsChannelSize),\n\t\tcounter: 0,\n\t\tstart: time.Now(),\n\t\tloggingPeriod: loggingPeriod,\n\t}\n\tgo sw.run()\n\treturn &sw\n}", "func (s *Stats) Add() {\n\ts.mutex.Lock()\n\ts.Unknown += 1\n\ts.mutex.Unlock()\n}", "func New() *Sausage {\n\tAST := AST{}\n\n\treturn &Sausage{\n\t\tTree: &AST,\n\t}\n}", "func (wds *WeaponAISystem) New(w *ecs.World) {\n\n}", "func NewDBStats(db *sql.DB, name string, labels []metrics.Label) DBStats {\n\tdbstats := &dBStats{\n\t\tdb: db,\n\t\tname: fmt.Sprintf(\"dbstats.%s\", name),\n\t\tlabels: labels,\n\t}\n\n\tdbstats.sched = util.NewScheduledExecutor(defaultTickTime, dbstats.emitStats)\n\n\treturn dbstats\n}", "func (n *namest) createName(db *sql.DB) error {\n\terr := db.QueryRow(\n\t\t\"INSERT INTO names(name) VALUES($1) RETURNING name\",\n\t\tn.Name).Scan(&n.Name)\n\tdb.Exec(\"UPDATE names SET count = count + 1 WHERE name=$1\",\n\t\tn.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func newCard(r Rank, s Suit) Card {\n\tc := Card{suit: s, rank: r}\n\tc.name = fmt.Sprintf(\"%s of %s\", longRank(r), s)\n\tc.short = fmt.Sprintf(\"%s%s\", shortRank(r), string(s[0]))\n\n\treturn c\n}", "func New(name string, initialPopulation int) *Speacies {\n\treturn &Speacies{\n\t\tName: name,\n\t\tpopulation: initialPopulation,\n\t}\n}", "func (s *Stats) Copy() *Stats {\n\treturn &Stats{\n\t\ts.FlowName,\n\t\ts.All.Copy(),\n\t\ts.Succeeded.Copy(),\n\t\ts.Failed.Copy(),\n\t\ts.Running.Copy(),\n\t\ts.Pending.Copy(),\n\t}\n}" ]
[ "0.63536394", "0.6279233", "0.6245355", "0.60528356", "0.6045652", "0.6042904", "0.59604484", "0.58574194", "0.57866055", "0.57197595", "0.56906897", "0.5679827", "0.5489235", "0.5416266", "0.53946114", "0.5381432", "0.53164285", "0.5296165", "0.5270525", "0.52274466", "0.5226704", "0.5187714", "0.5187714", "0.5137495", "0.51349485", "0.5091631", "0.50686413", "0.5027326", "0.5023885", "0.50191003", "0.50146484", "0.49520144", "0.4900958", "0.48968345", "0.48800582", "0.48734567", "0.48697275", "0.48689246", "0.48672807", "0.4865222", "0.48455596", "0.48370057", "0.48073608", "0.48064366", "0.48027912", "0.47989574", "0.4794956", "0.4790225", "0.47876137", "0.47833544", "0.47778776", "0.47752446", "0.47649598", "0.4756449", "0.474097", "0.47288203", "0.4721767", "0.47114927", "0.47034067", "0.46925744", "0.4679453", "0.46492562", "0.4643376", "0.46294597", "0.46213725", "0.46192297", "0.46160743", "0.46093163", "0.45996562", "0.45854142", "0.45787618", "0.4566579", "0.45551604", "0.45496857", "0.45353538", "0.45215163", "0.45180073", "0.45166382", "0.45041442", "0.44753024", "0.44715095", "0.44662493", "0.4458596", "0.44576412", "0.44575676", "0.44523776", "0.44513422", "0.4437096", "0.44293532", "0.44170287", "0.4405742", "0.44053003", "0.43955564", "0.439542", "0.4393789", "0.43900442", "0.4385242", "0.43746653", "0.43686858", "0.4364668" ]
0.6431501
0
statsFormatWrite renders the context for a list of containers statistics
func statsFormatWrite(ctx formatter.Context, Stats []StatsEntry, osType string, trunc bool) error { render := func(format func(subContext formatter.SubContext) error) error { for _, cstats := range Stats { statsCtx := &statsContext{ s: cstats, os: osType, trunc: trunc, } if err := format(statsCtx); err != nil { return err } } return nil } memUsage := memUseHeader if osType == winOSType { memUsage = winMemUseHeader } statsCtx := statsContext{} statsCtx.Header = formatter.SubHeaderContext{ "Container": containerHeader, "Name": formatter.NameHeader, "ID": formatter.ContainerIDHeader, "CPUPerc": cpuPercHeader, "MemUsage": memUsage, "MemPerc": memPercHeader, "NetIO": netIOHeader, "BlockIO": blockIOHeader, "PIDs": pidsHeader, "CurrentMemoryMin": currentMemoryMinHeader, "CurrentMemoryMax": currentMemoryMaxHeader, "OptiMemoryMin": optiMemoryMinHeader, "OptiMemoryMax": optiMemoryMaxHeader, "OptiCPUNumber": optiCPUNumberHeader, "UsedCPUPerc": usedCPUPercHeader, "OptiCPUTime": optiCPUTimeHeader, } statsCtx.os = osType return ctx.Write(&statsCtx, render) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (this *ContainerCtl) Stats(ctx context.Context) {\n\tvar id string\n\tid = ctx.Params().Get(\"id\")\n\n\tcli := GetDockerClient()\n\tresp, err := cli.ContainerStats(stdContext.Background(), id, true)\n\tif err != nil {\n\t\tlog.Println(\"ContainerExport err:\", err.Error())\n\t\tthis.ReturnJSon(ctx, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tthis.ReturnJSon(ctx, http.StatusOK, \"ok\", resp)\n\treturn\n}", "func ContainerWrite(ctx formatter.Context, containers []api.ContainerSummary) error {\n\trender := func(format func(subContext formatter.SubContext) error) error {\n\t\tfor _, container := range containers {\n\t\t\terr := format(&ContainerContext{trunc: ctx.Trunc, c: container})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn ctx.Write(NewContainerContext(), render)\n}", "func PrintStats(w http.ResponseWriter, r *http.Request) {\n var b bytes.Buffer\n // Print the totals\n b.WriteString(\"# HELP logstat_request_total logstat_request_total\\n\")\n b.WriteString(\"# TYPE logstat_request_total counter\\n\")\n for service, sdata := range Stats {\n for status, stdata := range sdata {\n for method, mdata := range stdata {\n b.WriteString(\"logstat_request_total\")\n b.WriteString(\"{service=\\\"\")\n b.WriteString(service)\n b.WriteString(\"\\\",status=\\\"\")\n b.WriteString(strconv.Itoa(status))\n b.WriteString(\"\\\",method=\\\"\")\n b.WriteString(method)\n b.WriteString(\"\\\"} \")\n b.WriteString(strconv.Itoa(int(mdata[CounterRequests])))\n b.WriteString(\"\\n\")\n }\n }\n }\n // Print the time to serve\n b.WriteString(\"# HELP logstat_request_tts_total logstat_request_tts_total\\n\")\n b.WriteString(\"# TYPE logstat_request_tts_total counter\\n\")\n for service, sdata := range Stats {\n for status, stdata := range sdata {\n for method, mdata := range stdata {\n b.WriteString(\"logstat_request_tts_total\")\n b.WriteString(\"{service=\\\"\")\n b.WriteString(service)\n b.WriteString(\"\\\",status=\\\"\")\n b.WriteString(strconv.Itoa(status))\n b.WriteString(\"\\\",method=\\\"\")\n b.WriteString(method)\n b.WriteString(\"\\\"} \")\n b.WriteString(strconv.FormatFloat(mdata[CounterTts], 'f', 3, 64))\n b.WriteString(\"\\n\")\n }\n }\n }\n w.Write([]byte(b.String()))\n}", "func (t *Compose) Stats() {\n\tif !t.statsRunning {\n\t\tt.statsRunning = true\n\t\tt.statsQuit = make(chan struct{})\n\t\tgo func() {\n\t\t\trunning := false\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-t.statsQuit:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tif !running {\n\t\t\t\t\t\trunning = true\n\t\t\t\t\t\tcmd := exec.Command(\"docker\", \"stats\", \"--no-stream\", \"--format\", \"\\\"{{.Name}}\\\\t{{.CPUPerc}}\\\\t{{.MemUsage}}\\\\t{{.MemPerc}}\\\"\")\n\t\t\t\t\t\tout, err := cmd.Output()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tt.emitError(err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\treg := regexp.MustCompile(\"\\n\")\n\t\t\t\t\t\tlines := reg.Split(string(out), -1)\n\t\t\t\t\t\tlines = lines[:len(lines)-1]\n\t\t\t\t\t\tstatsa := []stats{}\n\t\t\t\t\t\tfor _, line := range lines {\n\t\t\t\t\t\t\treg = regexp.MustCompile(\"\\t\")\n\t\t\t\t\t\t\tcontArr := reg.Split(line, -1)\n\t\t\t\t\t\t\tname := strings.Replace(contArr[0], \"_1\", \"\", -1)\n\t\t\t\t\t\t\tname = strings.Replace(name, t.vuexState.Store.Settings.ContainerPrefix+\"_\", \"\", -1)\n\t\t\t\t\t\t\tname = strings.Replace(name, `\"`, \"\", -1)\n\t\t\t\t\t\t\tstat := stats{\n\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\tCPUPercString: contArr[1],\n\t\t\t\t\t\t\t\tCPUPerc: strings.Replace(contArr[1], `%`, \"\", -1),\n\t\t\t\t\t\t\t\tMemoryUseage: contArr[2],\n\t\t\t\t\t\t\t\tMemoryPercentString: strings.Replace(contArr[3], `\"`, \"\", -1),\n\t\t\t\t\t\t\t\tMemoryPercent: strings.Replace(contArr[3], `%\"`, \"\", -1),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tstatsa = append(statsa, stat)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tres, mErr := json.Marshal(statsa)\n\t\t\t\t\t\tif mErr != nil {\n\t\t\t\t\t\t\tt.emitError(err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\tuEnc := b64.URLEncoding.EncodeToString(res)\n\t\t\t\t\t\tt.runtime.Events.Emit(\"stats\", uEnc)\n\t\t\t\t\t\trunning = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t}\n\t\t}()\n\t}\n}", "func statsHandler(w http.ResponseWriter, r *http.Request) {\n\tdata := core.GetStats()\n\tb, err := json.Marshal(data)\n\tif err != nil {\n\t\thttp.Error(w, \"Error marshalling JSON\", http.StatusInternalServerError)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tfmt.Fprintf(w, \"%s\", b)\n}", "func (m *Monitor) Stats(ctx *context.Context) {\n\tctx.JSON(m.Holder.GetStats())\n}", "func RenderSummaryStats(w io.Writer, stats SummaryStats) error {\n\tvar err error\n\t_, err = fmt.Fprintf(w, \"Your total expenses: %.7f DASH\\n\\n\", stats.TotalCost)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttw := tabwriter.NewWriter(w, 8, 8, 1, '\\t', tabwriter.Debug|tabwriter.AlignRight)\n\tvar (\n\t\trequestStats []Stats\n\t\tnetworkStats []Stats\n\t)\n\tfor _, stats := range stats.GroupedStats.Slice() {\n\t\tswitch stats.Type {\n\t\tcase RequestStatsType:\n\t\t\trequestStats = append(requestStats, stats)\n\t\tcase NetworkStatsType:\n\t\t\tnetworkStats = append(networkStats, stats)\n\t\t}\n\t}\n\tif len(requestStats) > 0 {\n\t\t_, err := io.WriteString(w, \"Summary statistics for all performed requests\\n\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = writeTable(tw, []string{\"Request URL\", \"Size/bytes\", \"Elapsed/ms\", \"Cost/dash\"}, requestStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t_, err = fmt.Fprintf(w, \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(networkStats) > 0 {\n\t\t_, err := io.WriteString(w, \"Summary statistics for all used networks\\n\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = writeTable(tw, []string{\"Network\", \"Size/bytes\", \"Elapsed/ms\", \"Cost/dash\"}, networkStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func StatsHandler(w http.ResponseWriter, _ *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\te := json.NewEncoder(w)\n\terr := e.Encode(httpstats.Data())\n\tif err != nil {\n\t\tlog.Println(\"Error encoding data:\", err)\n\t}\n}", "func (d *DBClient) StatsHandler(w http.ResponseWriter, req *http.Request, next http.HandlerFunc) {\n\tstats := d.Counter.Flush()\n\n\tcount, err := d.Cache.RecordsCount()\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tvar sr statsResponse\n\tsr.Stats = stats\n\tsr.RecordsCount = count\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tb, err := json.Marshal(sr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tw.Write(b)\n\t\treturn\n\t}\n\n}", "func writeStats(to *os.File, final bool, s, t stats.Stats) {\n\tp := fmt.Fprintf\n\tpn := prettyNumber\n\tpb := prettyNumBytes\n\tpl := prettyLatency\n\tpt := prettyTimeStamp\n\tif final {\n\t\twriteStatsHeader(to)\n\t\tp(to, statsPrintHeader, pt(), \"Put\",\n\t\t\tpn(t.TotalPuts()),\n\t\t\tpb(t.TotalPutBytes()),\n\t\t\tpl(t.MinPutLatency(), t.AvgPutLatency(), t.MaxPutLatency()),\n\t\t\tpb(t.PutThroughput(time.Now())),\n\t\t\tpn(t.TotalErrPuts()))\n\t\tp(to, statsPrintHeader, pt(), \"Get\",\n\t\t\tpn(t.TotalGets()),\n\t\t\tpb(t.TotalGetBytes()),\n\t\t\tpl(t.MinGetLatency(), t.AvgGetLatency(), t.MaxGetLatency()),\n\t\t\tpb(t.GetThroughput(time.Now())),\n\t\t\tpn(t.TotalErrGets()))\n\t} else {\n\t\t// show interval stats; some fields are shown of both interval and total, for example, gets, puts, etc\n\t\tif s.TotalPuts() != 0 {\n\t\t\tp(to, statsPrintHeader, pt(), \"Put\",\n\t\t\t\tpn(s.TotalPuts())+\"(\"+pn(t.TotalPuts())+\")\",\n\t\t\t\tpb(s.TotalPutBytes())+\"(\"+pb(t.TotalPutBytes())+\")\",\n\t\t\t\tpl(s.MinPutLatency(), s.AvgPutLatency(), s.MaxPutLatency()),\n\t\t\t\tpb(s.PutThroughput(time.Now()))+\"(\"+pb(t.PutThroughput(time.Now()))+\")\",\n\t\t\t\tpn(s.TotalErrPuts())+\"(\"+pn(t.TotalErrPuts())+\")\")\n\t\t}\n\t\tif s.TotalGets() != 0 {\n\t\t\tp(to, statsPrintHeader, pt(), \"Get\",\n\t\t\t\tpn(s.TotalGets())+\"(\"+pn(t.TotalGets())+\")\",\n\t\t\t\tpb(s.TotalGetBytes())+\"(\"+pb(t.TotalGetBytes())+\")\",\n\t\t\t\tpl(s.MinGetLatency(), s.AvgGetLatency(), s.MaxGetLatency()),\n\t\t\t\tpb(s.GetThroughput(time.Now()))+\"(\"+pb(t.GetThroughput(time.Now()))+\")\",\n\t\t\t\tpn(s.TotalErrGets())+\"(\"+pn(t.TotalErrGets())+\")\")\n\t\t}\n\t}\n}", "func TestContainerListStats(t *testing.T) {\n\tvar (\n\t\tstats []*runtime.ContainerStats\n\t\terr error\n\t)\n\tt.Logf(\"Create a pod config and run sandbox container\")\n\tsb, sbConfig := PodSandboxConfigWithCleanup(t, \"running-pod\", \"statsls\")\n\n\tpauseImage := images.Get(images.Pause)\n\tEnsureImageExists(t, pauseImage)\n\n\tt.Logf(\"Create a container config and run containers in a pod\")\n\tcontainerConfigMap := make(map[string]*runtime.ContainerConfig)\n\tfor i := 0; i < 3; i++ {\n\t\tcName := fmt.Sprintf(\"container%d\", i)\n\t\tcontainerConfig := ContainerConfig(\n\t\t\tcName,\n\t\t\tpauseImage,\n\t\t\tWithTestLabels(),\n\t\t\tWithTestAnnotations(),\n\t\t)\n\t\tcn, err := runtimeService.CreateContainer(sb, containerConfig, sbConfig)\n\t\trequire.NoError(t, err)\n\t\tcontainerConfigMap[cn] = containerConfig\n\t\tdefer func() {\n\t\t\tassert.NoError(t, runtimeService.RemoveContainer(cn))\n\t\t}()\n\t\trequire.NoError(t, runtimeService.StartContainer(cn))\n\t\tdefer func() {\n\t\t\tassert.NoError(t, runtimeService.StopContainer(cn, 10))\n\t\t}()\n\t}\n\n\tt.Logf(\"Fetch all container stats\")\n\trequire.NoError(t, Eventually(func() (bool, error) {\n\t\tstats, err = runtimeService.ListContainerStats(&runtime.ContainerStatsFilter{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, s := range stats {\n\t\t\tif s.GetWritableLayer().GetTimestamp() == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}, time.Second, 30*time.Second))\n\n\tt.Logf(\"Verify all container stats\")\n\tfor _, s := range stats {\n\t\ttestStats(t, s, containerConfigMap[s.GetAttributes().GetId()])\n\t}\n}", "func fprintStats(w io.Writer, q *QueryBenchmarker) {\n\tmaxKeyLength := 0\n\tkeys := make([]string, 0, len(q.statMapping))\n\tfor k := range q.statMapping {\n\t\tif len(k) > maxKeyLength {\n\t\t\tmaxKeyLength = len(k)\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, k := range keys {\n\t\tv := q.statMapping[k]\n\t\tminRate := 1e3 / v.Min\n\t\tmeanRate := 1e3 / v.Mean\n\t\tmaxRate := 1e3 / v.Max\n\t\tpaddedKey := fmt.Sprintf(\"%s\", k)\n\t\tfor len(paddedKey) < maxKeyLength {\n\t\t\tpaddedKey += \" \"\n\t\t}\n\t\tkStats := make(map[string]interface{})\n\t\tkStats[\"min\"] = v.Min\n\t\tkStats[\"minRate\"] = minRate\n\t\tkStats[\"mean\"] = v.Mean\n\t\tkStats[\"meanRate\"] = meanRate\n\t\tkStats[\"max\"] = v.Max\n\t\tkStats[\"maxRate\"] = maxRate\n\t\tkStats[\"count\"] = v.Count\n\t\tkStats[\"sum\"] = v.Sum / 1e3\n\t\tq.json[k] = kStats\n\t\tif !q.doJson {\n\t\t\t_, err := fmt.Fprintf(w, \"%s : min: %8.2fms (%7.2f/sec), mean: %8.2fms (%7.2f/sec), max: %7.2fms (%6.2f/sec), count: %8d, sum: %5.1fsec \\n\", paddedKey, v.Min, minRate, v.Mean, meanRate, v.Max, maxRate, v.Count, v.Sum/1e3)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\tq.json[\"totalQueries\"] = q.totalQueries\n\tq.json[\"wallClockTime\"] = q.wallTook.Seconds()\n\tq.json[\"queryRate\"] = float64(q.totalQueries) / float64(q.wallTook.Seconds())\n\tq.json[\"workers\"] = q.workers\n\tq.json[\"batchSize\"] = q.batchSize\n\tif q.doJson {\n\t\tfor k, v := range q.json {\n\t\t\tif _, err := json.Marshal(v); err != nil {\n\t\t\t\tq.json[k] = \"\"\n\t\t\t}\n\t\t}\n\t\tb, err := json.Marshal(q.json)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error:\", err)\n\t\t}\n\t\tos.Stdout.Write(b)\n\t}\n}", "func outputStatistics() {/*\n fmt.Println(\"Total Requests: \" + strconv.Itoa(requestCodeMap[\"total\"]))\n fmt.Println(\"100 Status Code Count: \" + strconv.Itoa(requestCodeMap[\"100\"]))\n fmt.Println(\"200 Status Code Count: \" + strconv.Itoa(requestCodeMap[\"200\"]))\n fmt.Println(\"300 Status Code Count: \" + strconv.Itoa(requestCodeMap[\"300\"]))\n fmt.Println(\"400 Status Code Count: \" + strconv.Itoa(requestCodeMap[\"400\"]))\n fmt.Println(\"500 Status Code Count: \" + strconv.Itoa(requestCodeMap[\"500\"]))\n fmt.Println(\"408 (Request Timeout) Status Code Count: \" + strconv.Itoa(requestCodeMap[\"error\"]))\n */\n\n fmt.Println(Counter.OutputKey(\"Total\"))\n fmt.Println(Counter.OutputKey(\"100s\"))\n fmt.Println(Counter.OutputKey(\"200s\"))\n fmt.Println(Counter.OutputKey(\"300s\"))\n fmt.Println(Counter.OutputKey(\"400s\"))\n fmt.Println(Counter.OutputKey(\"500s\"))\n fmt.Println(Counter.OutputKey(\"Errors\"))\n}", "func printStats(stats []statisic, hash string) {\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight|tabwriter.Debug)\n\n\tfmt.Fprintf(w, \"%s(w=%d):\\n\", hash, sketchWidth)\n\tfmt.Fprintf(w, \"data set\\tmax. abs.\\tavg. abs.\\tmax. rel.\\tavg. rel.\\t# exact\\n\")\n\n\tfor i := 0; i < len(stats); i++ {\n\t\tstat := stats[i]\n\t\tfmt.Fprintf(w, \"%s\\t%d\\t%d\\t%.2f\\t%.2f\\t%d\\n\", filePaths[i], stat.maxAbs, stat.avgAbs, stat.maxRel, stat.avgRel, 100-stat.misses)\n\t}\n\tfmt.Fprintln(w)\n\tw.Flush()\n}", "func statsHandler(a *sir.ApplicationContext, c web.C, w http.ResponseWriter, r *http.Request) (int, error) {\n\t// Number of available names in the pool\n\tremaining, _ := a.Redis.SCard(a.PoolKey).Result()\n\t// Number of taken names in the pool\n\ttaken, _ := a.Redis.SCard(a.AllocatedKey).Result()\n\t// Remaining\n\ttotal := remaining + taken\n\n\tresp, _ := json.Marshal(&statsResponse{\n\t\tTotal: total,\n\t\tTaken: taken,\n\t\tRemaining: remaining,\n\t})\n\n\tstatus := 200\n\n\tw.WriteHeader(status)\n\tw.Write(resp)\n\n\treturn status, nil\n}", "func (s *Server) ListContainerStats(ctx context.Context, req *types.ListContainerStatsRequest) (*types.ListContainerStatsResponse, error) {\n\tctrList, err := s.ContainerServer.ListContainers(\n\t\tfunc(container *oci.Container) bool {\n\t\t\treturn container.StateNoLock().Status != oci.ContainerStateStopped\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilter := req.Filter\n\tif filter != nil {\n\t\tcFilter := &types.ContainerFilter{\n\t\t\tID: req.Filter.ID,\n\t\t\tPodSandboxID: req.Filter.PodSandboxID,\n\t\t\tLabelSelector: req.Filter.LabelSelector,\n\t\t}\n\t\tctrList = s.filterContainerList(ctx, cFilter, ctrList)\n\t}\n\n\tallStats := make([]*types.ContainerStats, 0, len(ctrList))\n\tfor _, container := range ctrList {\n\t\tsb := s.GetSandbox(container.Sandbox())\n\t\tif sb == nil {\n\t\t\t// Because we don't lock, we will get situations where the container was listed, and then\n\t\t\t// its sandbox was deleted before we got to checking its stats.\n\t\t\t// We should not log in this expected situation.\n\t\t\tcontinue\n\t\t}\n\t\tcgroup := sb.CgroupParent()\n\t\tstats, err := s.Runtime().ContainerStats(ctx, container, cgroup)\n\t\tif err != nil {\n\t\t\t// ErrCgroupDeleted is another situation that will happen if the container\n\t\t\t// is deleted from underneath the call to this function.\n\t\t\tif !errors.Is(err, cgroups.ErrCgroupDeleted) {\n\t\t\t\t// The other errors are much less likely, and possibly useful to hear about.\n\t\t\t\tlog.Warnf(ctx, \"Unable to get stats for container %s: %v\", container.ID(), err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tresponse := s.buildContainerStats(ctx, stats, container)\n\t\tallStats = append(allStats, response)\n\t}\n\n\treturn &types.ListContainerStatsResponse{\n\t\tStats: allStats,\n\t}, nil\n}", "func tplServerFilterStatsGroups() []string {\n\tstats := []string{\"\", \"\", \"\", \"\"}\n\tbuf := bytes.Buffer{}\n\tstatsGroupsFilter := [][]map[string]string{binlogStatsGroups, cmdStatsGroups, currentStatsGroups, otherStatsGroups}\n\tfor k, statsGroups := range statsGroupsFilter {\n\t\tfor _, statsGroup := range statsGroups {\n\t\t\tfor property, description := range statsGroup {\n\t\t\t\tstatus := \"\"\n\t\t\t\tif checkInSlice(selfConf.Filter, property) {\n\t\t\t\t\tstatus = `checked`\n\t\t\t\t}\n\t\t\t\tbuf.Reset()\n\t\t\t\tbuf.WriteString(`<div class=\"control-group\"><div class=\"controls\"><div class=\"checkbox\"><label><input type=\"checkbox\" name=\"`)\n\t\t\t\tbuf.WriteString(property)\n\t\t\t\tbuf.WriteString(`\" `)\n\t\t\t\tbuf.WriteString(status)\n\t\t\t\tbuf.WriteString(`><b>`)\n\t\t\t\tbuf.WriteString(property)\n\t\t\t\tbuf.WriteString(`</b><br/>`)\n\t\t\t\tbuf.WriteString(description)\n\t\t\t\tbuf.WriteString(`</label></div></div></div>`)\n\t\t\t\tstats[k] += buf.String()\n\t\t\t}\n\t\t}\n\t}\n\treturn stats\n}", "func (r *remoteRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] ListContainerStats\", \"filter\", filter)\n\t// Do not set timeout, because writable layer stats collection takes time.\n\t// TODO(random-liu): Should we assume runtime should cache the result, and set timeout here?\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\treturn r.listContainerStatsV1(ctx, filter)\n}", "func (w *StatsDWriter) Write(results Summary) error {\n\tfor k, v := range results {\n\t\t_, err := fmt.Fprintf(w.writer, \"%s:%d|s\\n\", k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func getStats() string {\n\tvar mutex = &sync.Mutex{}\n\tvar stat []models.ActionOutput\n\t//using mutex to protect critical section and prevent race conditions.\n\tmutex.Lock()\n\tfor action, actionCounter := range actionMap {\n\t\tavgTime := actionCounter.TotalTime / (float64)(actionCounter.Counter)\n\t\tao := models.ActionOutput{\n\t\t\tAction: action,\n\t\t\tAvg: avgTime,\n\t\t}\n\t\tstat = append(stat, ao)\n\t}\n\tactionStats, _ := json.Marshal(stat)\n\tmutex.Unlock()\n\n\t//convert to serialized json string array\n\treturn string(actionStats)\n\n}", "func print_stats(){\nfmt.Print(\"\\nMemory usage statistics:\\n\")\nfmt.Printf(\"%v names\\n\",len(name_dir))\nfmt.Printf(\"%v replacement texts\\n\",len(text_info))\n}", "func (store Store) WriteStat(w io.Writer) error {\n\treturn store.db.View(func(tx *bolt.Tx) error {\n\t\tusers := tx.Bucket(bucket.RegisterDates).Stats().KeyN\n\t\tsubscriptions := tx.Bucket(bucket.Subscriptions).Stats().KeyN\n\t\tdbSize := float64(tx.Size()) / 1024.0 / 1024.0 // in mb\n\n\t\tphrasesTotal := tx.Bucket(bucket.Phrases).Stats().KeyN\n\t\tphrasesAvg := phrasesTotal / users\n\n\t\tscoretotal, err := sum(tx.Bucket(bucket.Scoretotals), simplesum)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tscoretotalAvg := scoretotal / users\n\n\t\tstudiesTotal := tx.Bucket(bucket.Studies).Stats().KeyN\n\t\tstudiesAvg := studiesTotal / users\n\n\t\tnow := itob(time.Now().Unix())\n\t\tdueStudiesTotal, err := sum(tx.Bucket(bucket.Studytimes), func(v []byte) int {\n\t\t\tif bytes.Compare(v, now) < 1 {\n\t\t\t\treturn 1\n\t\t\t}\n\t\t\treturn 0\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdueStudiesAvg := dueStudiesTotal / users\n\n\t\timportsTotal, err := sum(tx.Bucket(bucket.Imports), simplesum)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\timportsAvg := importsTotal / users\n\n\t\tnotifiesTotal, err := sum(tx.Bucket(bucket.Notifies), simplesum)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnotifiesAvg := notifiesTotal / users\n\n\t\tzeroscore, err := sum(tx.Bucket(bucket.Zeroscores), simplesum)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tzeroscoreAvg := zeroscore / users\n\n\t\tnewphrasesTotal, err := sum(tx.Bucket(bucket.NewPhrases), count64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnewphrasesAvg := newphrasesTotal / users\n\n\t\twarnings := \"\"\n\t\tnotNewPhrases := phrasesTotal - newphrasesTotal\n\t\tif n := tx.Bucket(bucket.Studytimes).Stats().KeyN; n != notNewPhrases {\n\t\t\twarnings += fmt.Sprintf(\"\\nWARNING: Number of studytimes (%d) does not match phrases - newphrases (%d).\\n\", n, notNewPhrases)\n\t\t}\n\t\tif n := tx.Bucket(bucket.PhraseAddTimes).Stats().KeyN; n != phrasesTotal {\n\t\t\twarnings += fmt.Sprintf(\"\\nWARNING: Number of phraseaddtimes (%d) does not match number of phrases (%d).\\n\", n, phrasesTotal)\n\t\t}\n\n\t\tfmt.Fprintf(\n\t\t\tw, statmsg, users, subscriptions, dbSize,\n\t\t\tphrasesTotal, phrasesAvg,\n\t\t\tscoretotal, scoretotalAvg,\n\t\t\tstudiesTotal, studiesAvg,\n\t\t\tdueStudiesTotal, dueStudiesAvg,\n\t\t\timportsTotal, importsAvg,\n\t\t\tnotifiesTotal, notifiesAvg,\n\t\t\tzeroscore, zeroscoreAvg,\n\t\t\tnewphrasesTotal, newphrasesAvg,\n\t\t\twarnings,\n\t\t)\n\t\treturn nil\n\t})\n}", "func (bs *blplStats) statsJSON() string {\n\tbuf := bytes.NewBuffer(make([]byte, 0, 128))\n\tfmt.Fprintf(buf, \"{\")\n\tfmt.Fprintf(buf, \"\\n \\\"TxnCount\\\": %v,\", bs.txnCount)\n\tfmt.Fprintf(buf, \"\\n \\\"QueryCount\\\": %v,\", bs.queryCount)\n\tfmt.Fprintf(buf, \"\\n \\\"QueriesPerSec\\\": %v,\", bs.queriesPerSec)\n\tfmt.Fprintf(buf, \"\\n \\\"TxnPerSec\\\": %v\", bs.txnsPerSec)\n\tfmt.Fprintf(buf, \"\\n \\\"TxnTime\\\": %v,\", bs.txnTime)\n\tfmt.Fprintf(buf, \"\\n \\\"QueryTime\\\": %v,\", bs.queryTime)\n\tfmt.Fprintf(buf, \"\\n}\")\n\treturn buf.String()\n}", "func (cb *printcb) outputStat(stats map[string]string) error {\n\tidx := len(*cb)\n\t*cb = append(*cb, FileDetails{})\n\tdetails := &(*cb)[idx]\n\tfor key, value := range stats {\n\t\tif err := setTaggedField(details, key, value, false); err != nil {\n\t\t\tglog.Warningf(\"Couldn't set field %v: %v\", key, err)\n\t\t}\n\t}\n\treturn nil\n}", "func RenderStats(uiView View, timeframe int) {\n\tif !uiView.UIEnabled {\n\t\tRenderStatsNoUI(uiView, timeframe)\n\t\treturn\n\t}\n\tif timeframe != uiView.ActiveTimeframe {\n\t\t// The updated timeframe is not to be updated\n\t\treturn\n\t}\n\trenderStatisticsLayout(uiView)\n\n\t// Processing the general stats table headers\n\tstatsHeaders := []string{\n\t\t\"Id\",\n\t\t\"Website\",\n\t\t\"Avg (ms)\",\n\t\t\"Max (ms)\",\n\t\t\"Availability\",\n\t}\n\tTable := [][]string{statsHeaders}\n\t// For each URL\n\tvar urlStatistic *statistics.Statistic\n\tfor id, url := range uiView.Urls {\n\t\turlStatistic = uiView.URLStatistics[url][uiView.ActiveTimeframe]\n\t\tif !math.IsNaN(urlStatistic.Average()) {\n\t\t\t// Append Statistics\n\t\t\tTable = append(Table, []string{\n\t\t\t\tfmt.Sprint(id),\n\t\t\t\tShorten(url),\n\t\t\t\tfmt.Sprintf(\"%.0f\", urlStatistic.Average()),\n\t\t\t\tfmt.Sprintf(\"%v\", urlStatistic.MaxResponseTime()),\n\t\t\t\tfmt.Sprintf(\"%.0f%%\", urlStatistic.Availability()*100.0),\n\t\t\t})\n\t\t}\n\t}\n\t// Rendering the updated table\n\trenderStatTable(Table)\n\n\t// Processing the detailed view\n\tdetailedStatistics := uiView.URLStatistics[uiView.Urls[uiView.ActiveWebsite]]\n\tdetailedHeaders := []string{\n\t\t\"TimeFrame\",\n\t\t\"Avg (ms)\",\n\t\t\"Max (ms)\",\n\t\t\"Availability\",\n\t\t\"Codes\",\n\t}\n\tdetailTable := [][]string{detailedHeaders}\n\tfor id, statistic := range detailedStatistics {\n\t\tif !math.IsNaN(urlStatistic.Average()) {\n\t\t\t// Append Statistics\n\t\t\tdetailTable = append(detailTable, []string{\n\t\t\t\tuiView.TimeframeRepr[id],\n\t\t\t\tfmt.Sprintf(\"%.0f\", statistic.Average()),\n\t\t\t\tfmt.Sprintf(\"%v\", statistic.MaxResponseTime()),\n\t\t\t\tfmt.Sprintf(\"%.0f%%\", statistic.Availability()*100.0),\n\t\t\t\tStatusCodeMapToString(statistic.StatusCodeCount),\n\t\t\t})\n\t\t}\n\t}\n\t// Processing the sparkline graph\n\tplotValues := detailedStatistics[2].RecentResponseTime()\n\n\t// Rendering the detailed view\n\trenderStatDetails(uiView, detailTable, plotValues)\n}", "func FormatDebugStats(stats []byte) (string, error) {\n\tvar dogStats map[uint64]metricStat\n\tif err := json.Unmarshal(stats, &dogStats); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// put metrics in order: first is the more frequent\n\torder := make([]uint64, len(dogStats))\n\ti := 0\n\tfor metric := range dogStats {\n\t\torder[i] = metric\n\t\ti++\n\t}\n\n\tsort.Slice(order, func(i, j int) bool {\n\t\treturn dogStats[order[i]].Count > dogStats[order[j]].Count\n\t})\n\n\t// write the response\n\tbuf := bytes.NewBuffer(nil)\n\n\theader := fmt.Sprintf(\"%-40s | %-20s | %-10s | %-20s\\n\", \"Metric\", \"Tags\", \"Count\", \"Last Seen\")\n\tbuf.Write([]byte(header))\n\tbuf.Write([]byte(strings.Repeat(\"-\", len(header)) + \"\\n\"))\n\n\tfor _, key := range order {\n\t\tstats := dogStats[key]\n\t\tbuf.Write([]byte(fmt.Sprintf(\"%-40s | %-20s | %-10d | %-20v\\n\", stats.Name, stats.Tags, stats.Count, stats.LastSeen)))\n\t}\n\n\tif len(dogStats) == 0 {\n\t\tbuf.Write([]byte(\"No metrics processed yet.\"))\n\t}\n\n\treturn buf.String(), nil\n}", "func reportStats(writer http.ResponseWriter, request *http.Request) {\r\n\r\n\terr := json.NewEncoder(writer).Encode(myReport)\r\n\tif err != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n}", "func (s *Store) Stats() []byte {\n\tb, _ := json.Marshal(s.BasicStats)\n\treturn b\n}", "func WriteStats(path string, hot, cold []int64) error {\n\t// Copy before sort.\n\tcold = append([]int64{}, cold...)\n\thot = append([]int64{}, hot...)\n\n\tsort.Slice(cold, func(i, j int) bool { return cold[i] < cold[j] })\n\tsort.Slice(hot, func(i, j int) bool { return hot[i] < hot[j] })\n\n\tpackedCold, err := Pack(cold)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to pack uploaded items\").Err()\n\t}\n\n\tpackedHot, err := Pack(hot)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to pack not uploaded items\").Err()\n\t}\n\n\tstatsJSON, err := json.Marshal(struct {\n\t\tItemsCold []byte `json:\"items_cold\"`\n\t\tItemsHot []byte `json:\"items_hot\"`\n\t}{\n\t\tItemsCold: packedCold,\n\t\tItemsHot: packedHot,\n\t})\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to marshal stats json\").Err()\n\t}\n\tif err := ioutil.WriteFile(path, statsJSON, 0600); err != nil {\n\t\treturn errors.Annotate(err, \"failed to write stats json\").Err()\n\t}\n\n\treturn nil\n}", "func StatsOutput(c *VsmStatsCommand, annotations *Annotations, args []string, statusArray []string, stats1 VolumeStats, stats2 VolumeStats) error {\n\n\tvar (\n\t\terr error\n\n\t\tReadLatency int64\n\t\tWriteLatency int64\n\n\t\tAvgReadBlockCountPS int64\n\t\tAvgWriteBlockCountPS int64\n\t)\n\n\t// 10 and 64 represents decimal and bits respectively\n\ti_riops, _ := strconv.ParseInt(stats1.ReadIOPS, 10, 64) // Initial\n\tf_riops, _ := strconv.ParseInt(stats2.ReadIOPS, 10, 64) // Final\n\treadIOPS := f_riops - i_riops\n\n\ti_rtps, _ := strconv.ParseInt(stats1.TotalReadTime, 10, 64)\n\tf_rtps, _ := strconv.ParseInt(stats2.TotalReadTime, 10, 64)\n\treadTimePS := f_rtps - i_rtps\n\n\ti_rbps, _ := strconv.ParseInt(stats1.TotalReadBlockCount, 10, 64)\n\tf_rbps, _ := strconv.ParseInt(stats2.TotalReadBlockCount, 10, 64)\n\treadBlockCountPS := f_rbps - i_rbps\n\n\trThroughput := readBlockCountPS\n\tif readIOPS != 0 {\n\t\tReadLatency = readTimePS / readIOPS\n\t\tAvgReadBlockCountPS = readBlockCountPS / readIOPS\n\t} else {\n\t\tReadLatency = 0\n\t\tAvgReadBlockCountPS = 0\n\t}\n\n\ti_wiops, _ := strconv.ParseInt(stats1.WriteIOPS, 10, 64)\n\tf_wiops, _ := strconv.ParseInt(stats2.WriteIOPS, 10, 64)\n\twriteIOPS := f_wiops - i_wiops\n\n\ti_wtps, _ := strconv.ParseInt(stats1.TotalWriteTime, 10, 64)\n\tf_wtps, _ := strconv.ParseInt(stats2.TotalWriteTime, 10, 64)\n\twriteTimePS := f_wtps - i_wtps\n\n\ti_wbcps, _ := strconv.ParseInt(stats1.TotalWriteBlockCount, 10, 64)\n\tf_wbcps, _ := strconv.ParseInt(stats2.TotalWriteBlockCount, 10, 64)\n\twriteBlockCountPS := f_wbcps - i_wbcps\n\n\twThroughput := writeBlockCountPS\n\tif writeIOPS != 0 {\n\t\tWriteLatency = writeTimePS / writeIOPS\n\t\tAvgWriteBlockCountPS = writeBlockCountPS / writeIOPS\n\t} else {\n\t\tWriteLatency = 0\n\t\tAvgWriteBlockCountPS = 0\n\t}\n\n\tss, _ := strconv.ParseFloat(stats2.SectorSize, 64) // Sector Size\n\tss = ss / bytesToMB\n\n\tls, _ := strconv.ParseFloat(stats2.UsedBlocks, 64) // Logical Size\n\tls = ls * ss\n\n\tau, _ := strconv.ParseFloat(stats2.UsedLogicalBlocks, 64) // Actual Used\n\tau = au * ss\n\n\tannotation := Annotation{\n\t\tIQN: annotations.Iqn,\n\t\tVolume: args[0],\n\t\tPortal: annotations.TargetPortal,\n\t\tSize: annotations.VolSize,\n\t}\n\n\t// json formatting and showing default output\n\tif c.Json == \"json\" {\n\n\t\tstat1 := StatsArr{\n\n\t\t\tIQN: annotations.Iqn,\n\t\t\tVolume: args[0],\n\t\t\tPortal: annotations.TargetPortal,\n\t\t\tSize: annotations.VolSize,\n\n\t\t\tReadIOPS: readIOPS,\n\t\t\tWriteIOPS: writeIOPS,\n\n\t\t\tReadThroughput: float64(rThroughput) / bytesToMB, // bytes to MB\n\t\t\tWriteThroughput: float64(wThroughput) / bytesToMB,\n\n\t\t\tReadLatency: float64(ReadLatency) / mic_sec, // Microsecond\n\t\t\tWriteLatency: float64(WriteLatency) / mic_sec,\n\n\t\t\tAvgReadBlockSize: AvgReadBlockCountPS / bytesToKB, // Bytes to KB\n\t\t\tAvgWriteBlockSize: AvgWriteBlockCountPS / bytesToKB,\n\n\t\t\tSectorSize: ss,\n\t\t\tActualUsed: au,\n\t\t\tLogicalSize: ls,\n\t\t}\n\n\t\tdata, err := json.MarshalIndent(stat1, \"\", \"\\t\")\n\n\t\tif err != nil {\n\n\t\t\tpanic(err)\n\t\t}\n\n\t\tos.Stdout.Write(data)\n\n\t} else {\n\n\t\t// Printing in tabular form\n\t\t//\tfmt.Printf(\"%+v\\n\\n\", annotation)\n\t\tdata, err := json.MarshalIndent(annotation, \"\", \"\\t\")\n\n\t\tif err != nil {\n\n\t\t\tpanic(err)\n\t\t}\n\n\t\tos.Stdout.Write(data)\n\n\t\tq := tabwriter.NewWriter(os.Stdout, minwidth, maxwidth, padding, ' ', tabwriter.AlignRight|tabwriter.Debug)\n\n\t\tfmt.Fprintf(q, \"\\n\\nReplica\\tStatus\\tDataUpdateIndex\\t\\n\")\n\t\tfmt.Fprintf(q, \"\\t\\t\\t\\n\")\n\t\tfor i := 0; i < 4; i += 3 {\n\n\t\t\tfmt.Fprintf(q, \"%s\\t%s\\t%s\\t\\n\", statusArray[i], statusArray[i+1], statusArray[i+2])\n\t\t}\n\n\t\tq.Flush()\n\n\t\tw := tabwriter.NewWriter(os.Stdout, minwidth, maxwidth, padding, ' ', tabwriter.AlignRight|tabwriter.Debug)\n\t\tfmt.Println(\"\\n----------- Performance Stats -----------\\n\")\n\t\tfmt.Fprintf(w, \"r/s\\tw/s\\tr(MB/s)\\tw(MB/s)\\trLat(ms)\\twLat(ms)\\t\\n\")\n\t\tfmt.Fprintf(w, \"%d\\t%d\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t\\n\", readIOPS, writeIOPS, float64(rThroughput)/bytesToMB, float64(wThroughput)/bytesToMB, float64(ReadLatency)/mic_sec, float64(WriteLatency)/mic_sec)\n\t\tw.Flush()\n\n\t\tx := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.AlignRight|tabwriter.Debug)\n\t\tfmt.Println(\"\\n------------ Capacity Stats -------------\\n\")\n\t\tfmt.Fprintf(x, \"Logical(GB)\\tUsed(GB)\\t\\n\")\n\t\tfmt.Fprintf(x, \"%f\\t%f\\t\\n\", ls, au)\n\t\tx.Flush()\n\t}\n\n\treturn err\n}", "func (p *cadvisorStatsProvider) ListPodStats(_ context.Context) ([]statsapi.PodStats, error) {\n\t// Gets node root filesystem information and image filesystem stats, which\n\t// will be used to populate the available and capacity bytes/inodes in\n\t// container stats.\n\trootFsInfo, err := p.cadvisor.RootFsInfo()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get rootFs info: %v\", err)\n\t}\n\timageFsInfo, err := p.cadvisor.ImagesFsInfo()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get imageFs info: %v\", err)\n\t}\n\tinfos, err := getCadvisorContainerInfo(p.cadvisor)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get container info from cadvisor: %v\", err)\n\t}\n\n\tfilteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(infos)\n\t// Map each container to a pod and update the PodStats with container data.\n\tpodToStats := map[statsapi.PodReference]*statsapi.PodStats{}\n\tfor key, cinfo := range filteredInfos {\n\t\t// On systemd using devicemapper each mount into the container has an\n\t\t// associated cgroup. We ignore them to ensure we do not get duplicate\n\t\t// entries in our summary. For details on .mount units:\n\t\t// http://man7.org/linux/man-pages/man5/systemd.mount.5.html\n\t\tif strings.HasSuffix(key, \".mount\") {\n\t\t\tcontinue\n\t\t}\n\t\t// Build the Pod key if this container is managed by a Pod\n\t\tif !isPodManagedContainer(&cinfo) {\n\t\t\tcontinue\n\t\t}\n\t\tref := buildPodRef(cinfo.Spec.Labels)\n\n\t\t// Lookup the PodStats for the pod using the PodRef. If none exists,\n\t\t// initialize a new entry.\n\t\tpodStats, found := podToStats[ref]\n\t\tif !found {\n\t\t\tpodStats = &statsapi.PodStats{PodRef: ref}\n\t\t\tpodToStats[ref] = podStats\n\t\t}\n\n\t\t// Update the PodStats entry with the stats from the container by\n\t\t// adding it to podStats.Containers.\n\t\tcontainerName := kubetypes.GetContainerName(cinfo.Spec.Labels)\n\t\tif containerName == leaky.PodInfraContainerName {\n\t\t\t// Special case for infrastructure container which is hidden from\n\t\t\t// the user and has network stats.\n\t\t\tpodStats.Network = cadvisorInfoToNetworkStats(&cinfo)\n\t\t} else {\n\t\t\tcontainerStat := cadvisorInfoToContainerStats(containerName, &cinfo, &rootFsInfo, &imageFsInfo)\n\t\t\t// NOTE: This doesn't support the old pod log path, `/var/log/pods/UID`. For containers\n\t\t\t// using old log path, they will be populated by cadvisorInfoToContainerStats.\n\t\t\tpodUID := types.UID(podStats.PodRef.UID)\n\t\t\tlogs, err := p.hostStatsProvider.getPodContainerLogStats(podStats.PodRef.Namespace, podStats.PodRef.Name, podUID, containerName, &rootFsInfo)\n\t\t\tif err != nil {\n\t\t\t\tklog.ErrorS(err, \"Unable to fetch container log stats\", \"containerName\", containerName)\n\t\t\t} else {\n\t\t\t\tcontainerStat.Logs = logs\n\t\t\t}\n\t\t\tpodStats.Containers = append(podStats.Containers, *containerStat)\n\t\t}\n\t}\n\n\t// Add each PodStats to the result.\n\tresult := make([]statsapi.PodStats, 0, len(podToStats))\n\tfor _, podStats := range podToStats {\n\t\tmakePodStorageStats(podStats, &rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, false)\n\n\t\tpodUID := types.UID(podStats.PodRef.UID)\n\t\t// Lookup the pod-level cgroup's CPU and memory stats\n\t\tpodInfo := getCadvisorPodInfoFromPodUID(podUID, allInfos)\n\t\tif podInfo != nil {\n\t\t\tcpu, memory := cadvisorInfoToCPUandMemoryStats(podInfo)\n\t\t\tpodStats.CPU = cpu\n\t\t\tpodStats.Memory = memory\n\t\t\tpodStats.Swap = cadvisorInfoToSwapStats(podInfo)\n\t\t\tpodStats.ProcessStats = cadvisorInfoToProcessStats(podInfo)\n\t\t}\n\n\t\tstatus, found := p.statusProvider.GetPodStatus(podUID)\n\t\tif found && status.StartTime != nil && !status.StartTime.IsZero() {\n\t\t\tpodStats.StartTime = *status.StartTime\n\t\t\t// only append stats if we were able to get the start time of the pod\n\t\t\tresult = append(result, *podStats)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (c CliCommunicator) Stats() ([]Stats, error) {\n\tout, err := exec.Command(c.DockerPath, c.Command...).Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontainers := strings.Split(string(out), \"\\n\")\n\tstats := make([]Stats, 0)\n\tfor _, con := range containers {\n\t\tif len(con) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar s Stats\n\t\tif err := json.Unmarshal([]byte(con), &s); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstats = append(stats, s)\n\t}\n\n\treturn stats, nil\n}", "func (c *Container) Stats(ctx context.Context) (*types.StatsJSON, error) {\n\tresponseBody, err := c.client.ContainerStats(ctx, c.id, false /*stream*/)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ContainerStats failed: %v\", err)\n\t}\n\tdefer responseBody.Body.Close()\n\tvar v types.StatsJSON\n\tif err := json.NewDecoder(responseBody.Body).Decode(&v); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode container stats: %v\", err)\n\t}\n\treturn &v, nil\n}", "func CreateStats(cluster, namespace, volumeName, deploymentName, mountPath, pathRestic, podName string) string {\n\tvar stats map[string]interface{}\n\tvar nameStats string\n\tif cluster == \"ClusterFrom\" {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_from\")\n\t\tnameStats = \"statsFrom\"\n\t} else {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_to\")\n\t\tnameStats = \"statsTo\"\n\t}\n\n\tauxName := \"stats-\" + deploymentName\n\tsizeVolume := utils.GetSizeVolume(podName, volumeName, mountPath)\n\tstats[\"name\"] = auxName\n\tstats[\"size\"] = sizeVolume\n\terr := utils.WriteJson(pathRestic, nameStats, stats)\n\tif err != nil {\n\t\tfmt.Println(\"Error creating \" + auxName)\n\t}\n\treturn sizeVolume\n}", "func (collStatList *CollectionStatList) Export(ch chan<- prometheus.Metric) {\n\tfor _, member := range collStatList.Members {\n\t\tls := prometheus.Labels{\n\t\t\t\"db\": member.Database,\n\t\t\t\"coll\": member.Name,\n\t\t}\n\t\tcollectionSize.With(ls).Set(float64(member.Size))\n\t\tcollectionObjectCount.With(ls).Set(float64(member.Count))\n\t\tcollectionAvgObjSize.With(ls).Set(float64(member.AvgObjSize))\n\t\tcollectionStorageSize.With(ls).Set(float64(member.StorageSize))\n\t\tcollectionIndexes.With(ls).Set(float64(member.Indexes))\n\t\tcollectionIndexesSize.With(ls).Set(float64(member.IndexesSize))\n\t}\n\tcollectionSize.Collect(ch)\n\tcollectionObjectCount.Collect(ch)\n\tcollectionAvgObjSize.Collect(ch)\n\tcollectionStorageSize.Collect(ch)\n\tcollectionIndexes.Collect(ch)\n\tcollectionIndexesSize.Collect(ch)\n}", "func stats(stats elastic.BulkProcessorStats) {\n\t//构建Workers的json文本\n\tvar workersStr string\n\tvar workers Workers\n\tif err := workers.InitWorkers(stats.Workers); err == nil {\n\t\tworkersStr = workers.String()\n\t}\n\n\t//打印stats信息\n\tlog.Logger.WithFields(logrus.Fields{\n\t\t\"Flushed\": stats.Flushed,\n\t\t\"Committed\": stats.Committed,\n\t\t\"Indexed\": stats.Indexed,\n\t\t\"Created\": stats.Created,\n\t\t\"Updated\": stats.Updated,\n\t\t\"Deleted\": stats.Deleted,\n\t\t\"Succeeded\": stats.Succeeded,\n\t\t\"Failed\": stats.Failed,\n\t\t\"Workers\": workersStr,\n\t}).Info(\"stats info detail\")\n}", "func ContainerStats(client *criapi.RuntimeServiceClient, opts statsOptions, name string) (*MetricsV2, error) {\n\tfilter := &criapi.ContainerStatsFilter{}\n\tif opts.id != \"\" {\n\t\tfilter.Id = opts.id\n\t}\n\tif opts.podID != \"\" {\n\t\tfilter.PodSandboxId = opts.podID\n\t}\n\tif opts.labels != nil {\n\t\tfilter.LabelSelector = opts.labels\n\t}\n\trequest := &criapi.ListContainerStatsRequest{\n\t\tFilter: filter,\n\t}\n\n\tmetrics := &MetricsV2{}\n\tvar err error\n\tif metrics, err = displayStats(client, request, name); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn metrics, nil\n}", "func FormatWrite(ctx formatter.Context, networks []types.NetworkResource) error {\n\trender := func(format func(subContext formatter.SubContext) error) error {\n\t\tfor _, network := range networks {\n\t\t\tnetworkCtx := &networkContext{trunc: ctx.Trunc, n: network}\n\t\t\tif err := format(networkCtx); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tnetworkCtx := networkContext{}\n\tnetworkCtx.Header = formatter.SubHeaderContext{\n\t\t\"ID\": networkIDHeader,\n\t\t\"Name\": formatter.NameHeader,\n\t\t\"Driver\": formatter.DriverHeader,\n\t\t\"Scope\": formatter.ScopeHeader,\n\t\t\"IPv6\": ipv6Header,\n\t\t\"Internal\": internalHeader,\n\t\t\"Labels\": formatter.LabelsHeader,\n\t\t\"CreatedAt\": formatter.CreatedAtHeader,\n\t}\n\treturn ctx.Write(&networkCtx, render)\n}", "func (c *Client) Stats(indexList []string, extraArgs url.Values) (*Response, error) {\n\tr := Request{\n\t\tIndexList: indexList,\n\t\tExtraArgs: extraArgs,\n\t\tMethod: \"GET\",\n\t\tAPI: \"_stats\",\n\t}\n\n\treturn c.Do(&r)\n}", "func (a API) Stats(c echo.Context) error {\n\tvar output []OutputStat\n\tvar err error\n\n\tif output, err = a.maxRecord(); err != nil {\n\t\tc.Logger().Error(err.Error())\n\t\treturn c.JSON(http.StatusInternalServerError, \"please contact administrator\")\n\t}\n\tif len(output) == 0 {\n\t\treturn c.NoContent(http.StatusNoContent)\n\t}\n\treturn c.JSON(http.StatusOK, output)\n}", "func (s *service) Stats() Stats {\n\ts.m.Lock()\n\tdefer s.m.Unlock()\n\n\tstats := Stats{\n\t\tServiceIdentity: s.serviceIdentity(),\n\t\tEndpoints: make([]*EndpointStats, 0),\n\t\tType: StatsResponseType,\n\t\tStarted: s.started,\n\t}\n\tfor _, endpoint := range s.endpoints {\n\t\tendpointStats := &EndpointStats{\n\t\t\tName: endpoint.stats.Name,\n\t\t\tSubject: endpoint.stats.Subject,\n\t\t\tNumRequests: endpoint.stats.NumRequests,\n\t\t\tNumErrors: endpoint.stats.NumErrors,\n\t\t\tLastError: endpoint.stats.LastError,\n\t\t\tProcessingTime: endpoint.stats.ProcessingTime,\n\t\t\tAverageProcessingTime: endpoint.stats.AverageProcessingTime,\n\t\t}\n\t\tif s.StatsHandler != nil {\n\t\t\tdata, _ := json.Marshal(s.StatsHandler(endpoint))\n\t\t\tendpointStats.Data = data\n\t\t}\n\t\tstats.Endpoints = append(stats.Endpoints, endpointStats)\n\t}\n\treturn stats\n}", "func FormatContainerStatuses(ctx context.Context, pod *corev1.Pod, indent int, kubeClientSet kubernetes.Interface, start, end time.Time) string {\n\tstatuses := make([]string, len(pod.Status.ContainerStatuses))\n\tfor i, status := range pod.Status.ContainerStatuses {\n\t\tlogs := \"\"\n\t\tif kubeClientSet != nil {\n\t\t\tcontainerLogs, err := GetContainerLogs(ctx, kubeClientSet, pod.GetName(), pod.GetNamespace(), status.Name, start, end)\n\t\t\tif err != nil {\n\t\t\t\tlogs = fmt.Sprintf(\"error while fetching: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\tlogs = ApplyIdent(string(containerLogs), 2)\n\t\t\t}\n\t\t}\n\n\t\tstatuses[i] = FormatContainerStatus(status, logs, 0)\n\t}\n\n\treturn FormatList(statuses, indent)\n}", "func writeOutputSizes(sizes OutputReport, outPath string) error {\n\tf, err := os.Create(outPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create %s: %s\", outPath, err)\n\t}\n\n\tencoder := json.NewEncoder(f)\n\tencoder.SetIndent(\"\", \" \")\n\tsimpleSizes := make(map[string]interface{})\n\tbudgetSuffix := \".budget\"\n\tcreepBudgetSuffix := \".creepBudget\"\n\t// Owner/context links to provide shortcut to component specific size stats.\n\townerSuffix := \".owner\"\n\tfor name, cs := range sizes {\n\t\tsimpleSizes[name] = cs.Size\n\t\tsimpleSizes[name+budgetSuffix] = cs.Budget\n\t\tsimpleSizes[name+creepBudgetSuffix] = cs.CreepBudget\n\t\tsimpleSizes[name+ownerSuffix] = \"http://go/fuchsia-size-stats/single_component/?f=component:in:\" + url.QueryEscape(name)\n\t}\n\tif err := encoder.Encode(&simpleSizes); err != nil {\n\t\t_ = f.Close()\n\t\treturn fmt.Errorf(\"failed to encode simpleSizes: %s\", err)\n\t}\n\treturn f.Close()\n}", "func MakeHandleStats(stats *Stats) func(w http.ResponseWriter, r *http.Request){\r\n return func (w http.ResponseWriter, r *http.Request){\r\n //Note totalTime does not picked up by JSON as it's lowercase\r\n jsonStats, err := json.Marshal(stats)\r\n if err != nil {\r\n sendError(w, \"404 Error getting JSON object\")\r\n return\r\n }\r\n w.Header().Set(\"Content-Type\", \"application/json\")\r\n f, err := w.Write(jsonStats)\r\n checkError(f, err)\r\n }\r\n}", "func ContainerStats (ctx context.Context, cli *client.Client, containerId string) StatsEntry{\n\tvar MaxStats = StatsEntry{}\n\n\terrChan := make(chan error, 1)\n\tdoneChan\t\t\t:= make (chan bool)\n\tgo collect (ctx, containerId, cli, true, &MaxStats, doneChan, errChan)\n\tfor {\n\t\tselect {\n\t\tcase <-doneChan:\n\t\t\treturn MaxStats\n\t\tcase <-errChan:\n\t\t\treturn MaxStats\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn MaxStats\n}", "func (statsResult *Result) Format() string {\n\tformat := \"\"\n\tformat += fmt.Sprintln(\"Summary:\")\n\tformat += fmt.Sprintf(\"\\tClients:\\t%d\\n\", statsResult.Clients)\n\tformat += fmt.Sprintf(\"\\tParallel calls per client:\\t%d\\n\", statsResult.Parallels)\n\tformat += fmt.Sprintf(\"\\tTotal calls:\\t%d\\n\", statsResult.TotalCalls)\n\tformat += fmt.Sprintf(\"\\tTotal time:\\t%.3fs\\n\", statsResult.TotalTime)\n\tformat += fmt.Sprintf(\"\\tRequests per second:\\t%.3f\\n\", statsResult.RequestsPerSecond)\n\tformat += fmt.Sprintf(\"\\tFastest time for request:\\t%.6fms\\n\", statsResult.FastestTimeForRequest)\n\tformat += fmt.Sprintf(\"\\tAverage time per request:\\t%.6fms\\n\", statsResult.AverageTimePerRequest)\n\tformat += fmt.Sprintf(\"\\tSlowest time for request:\\t%.6fms\\n\\n\", statsResult.SlowestTimeForRequest)\n\tformat += fmt.Sprintln(\"Time:\")\n\tformat += fmt.Sprintf(\"\\t00.0001%%\\ttime for request:\\t%.6fms\\n\", statsResult.N000001thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t00.0010%%\\ttime for request:\\t%.6fms\\n\", statsResult.N000010thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t00.0100%%\\ttime for request:\\t%.6fms\\n\", statsResult.N000100thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t00.1000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N001000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t01.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N010000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t05.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N050000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t10.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N100000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t25.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N250000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t50.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N500000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t75.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N750000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t90.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N900000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t95.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N950000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.0000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N990000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.9000%%\\ttime for request:\\t%.6fms\\n\", statsResult.N999000thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.9900%%\\ttime for request:\\t%.6fms\\n\", statsResult.N999900thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.9990%%\\ttime for request:\\t%.6fms\\n\", statsResult.N999990thMillionthTime)\n\tformat += fmt.Sprintf(\"\\t99.9999%%\\ttime for request:\\t%.6fms\\n\\n\", statsResult.N999999thMillionthTime)\n\n\tif statsResult.TotalRequestBodySizes > 0 {\n\t\tformat += fmt.Sprintln(\"Request:\")\n\t\tformat += fmt.Sprintf(\"\\tTotal request body sizes:\\t%d\\n\", statsResult.TotalRequestBodySizes)\n\t\tformat += fmt.Sprintf(\"\\tAverage body size per request:\\t%.2f Byte\\n\", statsResult.AverageBodySizePerRequest)\n\t\tformat += fmt.Sprintf(\"\\tRequest rate per second:\\t%.2f Byte/s (%.2f MByte/s)\\n\\n\", statsResult.RequestRateBytePerSecond, statsResult.RequestRateMBytePerSecond)\n\t}\n\tif statsResult.TotalResponseBodySizes > 0 {\n\t\tformat += fmt.Sprintln(\"Response:\")\n\t\tformat += fmt.Sprintf(\"\\tTotal response body sizes:\\t%d\\n\", statsResult.TotalResponseBodySizes)\n\t\tformat += fmt.Sprintf(\"\\tAverage body size per response:\\t%.2f Byte\\n\", statsResult.AverageBodySizePerResponse)\n\t\tformat += fmt.Sprintf(\"\\tResponse rate per second:\\t%.2f Byte/s (%.2f MByte/s)\\n\\n\", statsResult.ResponseRateBytePerSecond, statsResult.ResponseRateMBytePerSecond)\n\t}\n\tformat += fmt.Sprintln(\"Result:\")\n\tformat += fmt.Sprintf(\"\\tResponse ok:\\t%d (%.3f%%)\\n\", statsResult.ResponseOk, statsResult.ResponseOkPercentile)\n\tformat += fmt.Sprintf(\"\\tErrors:\\t%d (%.3f%%)\\n\", statsResult.Errors, statsResult.ErrorsPercentile)\n\treturn format\n}", "func convertContainerStats(containerStats *container_info.ContainerStats, latestTime *time.Time) (DetailContainerStats, error) {\n\n\tif containerStats.Timestamp.After(*latestTime) {\n\t\t*latestTime = containerStats.Timestamp\n\t}\n\n\tvar stats DetailContainerStats\n\tstats.Timestamp = containerStats.Timestamp\n\tstats.Cpu_usage_seconds_total = containerStats.Cpu.Usage.Total\n\tstats.Cpu_user_seconds_total = containerStats.Cpu.Usage.User\n\tstats.Cpu_system_seconds_total = containerStats.Cpu.Usage.System\n\n\tstats.Memory_usage_bytes = containerStats.Memory.Usage\n\t//stats.Memory_limit_bytes = containerStats.Memory.Usage\n\tstats.Memory_cache = containerStats.Memory.Cache\n\tstats.Memory_rss = containerStats.Memory.RSS\n\tstats.Memory_swap = containerStats.Memory.Swap\n\n\tstats.Network_receive_bytes_total = containerStats.Network.RxBytes\n\tstats.Network_receive_packets_total = containerStats.Network.RxPackets\n\tstats.Network_receive_packets_dropped_total = containerStats.Network.RxDropped\n\tstats.Network_receive_errors_total = containerStats.Network.RxErrors\n\tstats.Network_transmit_bytes_total = containerStats.Network.TxBytes\n\tstats.Network_transmit_packets_total = containerStats.Network.TxPackets\n\tstats.Network_transmit_packets_dropped_total = containerStats.Network.TxDropped\n\tstats.Network_transmit_errors_total = containerStats.Network.TxErrors\n\n\tstats.Filesystem = converFsStats(containerStats.Filesystem)\n\n\tstats.Diskio_service_bytes_async = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsAsync)\n\tstats.Diskio_service_bytes_read = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsRead)\n\tstats.Diskio_service_bytes_sync = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsSync)\n\tstats.Diskio_service_bytes_total = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsTotal)\n\tstats.Diskio_service_bytes_write = sumDiskStats(containerStats.DiskIo.IoServiceBytes, DiskStatsWrite)\n\n\tstats.Tasks_state_nr_sleeping = containerStats.TaskStats.NrSleeping\n\tstats.Tasks_state_nr_running = containerStats.TaskStats.NrRunning\n\tstats.Tasks_state_nr_stopped = containerStats.TaskStats.NrStopped\n\tstats.Tasks_state_nr_uninterruptible = containerStats.TaskStats.NrUninterruptible\n\tstats.Tasks_state_nr_io_wait = containerStats.TaskStats.NrIoWait\n\treturn stats, nil\n}", "func statsDashboard(w http.ResponseWriter, r *http.Request) {\n\tv := mux.Vars(r)\n\tservice, found := v[\"service\"]\n\tif !found {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Service not found\\n\")\n\t\treturn\n\t}\n\n\t// execute the dashboad template\n\tdashboardTemplate.Execute(w, struct{ Name, Service string }{\n\t\tName: service,\n\t\tService: strings.ReplaceAll(service, \".\", \"_\"),\n\t})\n}", "func GetStats(args *Args, format string) string {\n\tcfg := config.GetConfig(args.ConfigFile)\n\t// init statistic for record\n\tstatistic.InitStatistic(cfg.Statistic)\n\n\tallQueueStatistic := []*statistic.QueueStatistic{}\n\n\tfor _, cc := range cfg.Redis {\n\t\tfor _, queueConfig := range cc.Queues {\n\t\t\ts := &statistic.QueueStatistic{\n\t\t\t\tQueueName: queueConfig.QueueName,\n\t\t\t\tSourceType: \"Redis\",\n\t\t\t\tIsEnabled: queueConfig.IsEnabled,\n\t\t\t}\n\n\t\t\tqi := &redis.QueueInstance{\n\t\t\t\tSource: cc.Config,\n\t\t\t\tQueue: queueConfig,\n\t\t\t}\n\n\t\t\tif queueConfig.IsDelayQueue {\n\t\t\t\ts.Normal, _ = qi.DelayLength(queueConfig.QueueName)\n\t\t\t} else {\n\t\t\t\ts.Normal, _ = qi.Length(queueConfig.QueueName)\n\t\t\t}\n\n\t\t\tif len(queueConfig.DelayOnFailure) > 0 {\n\t\t\t\tqueueName := fmt.Sprintf(\"%s:delayed\", queueConfig.QueueName)\n\t\t\t\ts.Delayed, _ = qi.DelayLength(queueName)\n\t\t\t}\n\n\t\t\ts.Success, _ = statistic.GetCounter(fmt.Sprintf(\"%s:success\", queueConfig.QueueName))\n\t\t\ts.Failure, _ = statistic.GetCounter(fmt.Sprintf(\"%s:failure\", queueConfig.QueueName))\n\n\t\t\ts.Total = s.Normal + s.Delayed + s.Success + s.Failure\n\n\t\t\tallQueueStatistic = append(allQueueStatistic, s)\n\t\t}\n\t}\n\n\tfor _, cc := range cfg.RabbitMQ {\n\t\tfor _, queueConfig := range cc.Queues {\n\t\t\ts := &statistic.QueueStatistic{\n\t\t\t\tQueueName: queueConfig.QueueName,\n\t\t\t\tSourceType: \"RabbitMQ\",\n\t\t\t\tIsEnabled: queueConfig.IsEnabled,\n\t\t\t}\n\n\t\t\t// qi := &rabbitmq.QueueInstance{\n\t\t\t// \tSource: cc.Config,\n\t\t\t// \tQueue: queueConfig,\n\t\t\t// }\n\t\t\t// todo get queue length\n\n\t\t\ts.Normal = 0\n\t\t\ts.Delayed = 0\n\n\t\t\ts.Success, _ = statistic.GetCounter(fmt.Sprintf(\"%s:success\", queueConfig.QueueName))\n\t\t\ts.Failure, _ = statistic.GetCounter(fmt.Sprintf(\"%s:failure\", queueConfig.QueueName))\n\n\t\t\ts.Total = s.Normal + s.Delayed + s.Success + s.Failure\n\n\t\t\tallQueueStatistic = append(allQueueStatistic, s)\n\t\t}\n\t}\n\n\tif \"json\" == format {\n\t\toutput, err := json.Marshal(allQueueStatistic)\n\n\t\tif nil != err {\n\t\t\treturn \"\"\n\t\t}\n\n\t\treturn string(output)\n\t}\n\n\toutput := fmt.Sprintf(\"%s %s statistics information\\n\\n\", constant.APPNAME, constant.APPVERSION)\n\tfor _, s := range allQueueStatistic {\n\t\tstatus := \"disable\"\n\t\tif s.IsEnabled {\n\t\t\tstatus = \"enable\"\n\t\t}\n\t\toutput += fmt.Sprintf(\" > Type: %-8s Status: %-8s Name: %s\\n%10d Total\\n%10d Normal\\n%10d Delayed\\n%10d Success\\n%10d Failure\\n\\n\", s.SourceType, status, s.QueueName, s.Total, s.Normal, s.Delayed, s.Success, s.Failure)\n\t}\n\n\tif \"html\" == format {\n\t\tstrings.Replace(output, \"\\n\", \"<br />\", -1)\n\t}\n\n\treturn output\n}", "func createStat(votes *Vote) string {\n\n\tstats := NewStatistics(votes)\n\n\tstr := \"Total: \" + strconv.Itoa(stats.Total) + \"\\n\"\n\tfor value, users := range stats.Transformed {\n\t\tstr += value + \" (\" + strconv.Itoa(len(users)) + \"): \" + strings.Join(users, \", \") + \"\\n\"\n\t}\n\n\treturn str\n\n}", "func (cs *Stats) SprintStats() string {\n\tvar s = \"==== Stats by cold functions ====\\n\"\n\ts += \"fID, #started, #served\\n\"\n\n\tfuncs := make([]string, 0, len(cs.statMap))\n\tfor fID := range cs.statMap {\n\t\tfuncs = append(funcs, fID)\n\t}\n\tsort.Slice(funcs, func(i, j int) bool {\n\t\tnumA, _ := strconv.Atoi(funcs[i])\n\t\tnumB, _ := strconv.Atoi(funcs[j])\n\t\treturn numA < numB\n\t})\n\n\tfor _, fID := range funcs {\n\t\ts += fmt.Sprintf(\"%s, %d, %d\\n\", fID,\n\t\t\tatomic.LoadUint64(&cs.statMap[fID].started),\n\t\t\tatomic.LoadUint64(&cs.statMap[fID].served))\n\t}\n\n\ts += \"===================================\"\n\n\treturn s\n}", "func printDbstat(v *gocui.View, config *config, s stat.Stat) error {\n\t// If reading stats failed, print the error occurred and return.\n\tif s.Error != nil {\n\t\t_, err := fmt.Fprint(v, formatError(s.Error))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.Error = nil\n\t\treturn nil\n\t}\n\n\t// Align values within columns, use fixed aligning instead of dynamic.\n\tif !config.view.Aligned {\n\t\twidthes, cols := align.SetAlign(s.Result, 1000, false) // use high limit (1000) to avoid truncating last value.\n\t\tconfig.view.Cols = cols\n\t\tconfig.view.ColsWidth = widthes\n\t\tconfig.view.Aligned = true\n\t}\n\n\t// Print header.\n\terr := printStatHeader(v, s, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Print data.\n\terr = printStatData(v, s, config, isFilterRequired(config.view.Filters))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (udb *urlDB) statsHandler(w http.ResponseWriter, r *http.Request) {\n\tvar rws []Rows\n\tvar cols = []Columns{\n\t\t{\n\t\t\tLabel: \"X\",\n\t\t\tColumnType: \"date\",\n\t\t},\n\t\t{\n\t\t\tLabel: \"Daily Total\",\n\t\t\tColumnType: \"number\",\n\t\t},\n\t}\n\n\t// Select all records from the database, split the datetime into a date and group by days. Count the number of requests per day.\n\t// This needs to be changed soon to allow a date range, and possibly to also to fill in empty days if they exist.\n\trows, err := udb.db.Query(\"SELECT COUNT(*) as count, t_stamp::DATE as ts FROM url_map GROUP BY ts ORDER BY ts\")\n\tcheckDBErr(err)\n\n\tfor rows.Next() {\n\t\tvar t time.Time\n\t\tvar count int\n\t\terr := rows.Scan(&count, &t)\n\t\tcheckDBErr(err)\n\t\tconst layout = \"Jan 2, 2006\"\n\t\tdate := t.Format(layout)\n\n\t\t// Convert the date into a string for the JS structure: \"Date(YYYY,M,D)\".\n\t\t// Javascript months are zero indexed (stupid) so we must subtract one. Thanks to my lovely wife for catching that bug\n\t\tdatestr := fmt.Sprintf(\"Date(%d,%d,%d)\", t.Year(), t.Month()-1, t.Day())\n\t\tdailytotal := strconv.FormatInt(int64(count), 10)\n\n\t\trws = append(rws, Rows{\n\t\t\tC: []C2{\n\t\t\t\t{\n\t\t\t\t\tV: datestr,\n\t\t\t\t\tF: date,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tV: dailytotal,\n\t\t\t\t\tF: dailytotal,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\t// Finish building our data structure\n\tchart := &Charts{Cols: cols, Rws: rws}\n\n\t// Encode into JSON and pass to our stats.html template\n\tcht, _ := json.Marshal(chart)\n\n\t// Build template data structure\n\tp := &Page{\n\t\tTitle: \"Stats\",\n\t\tContent: struct {\n\t\t\tJS interface{}\n\t\t}{\n\t\t\ttemplate.HTML(string(cht)),\n\t\t},\n\t}\n\n\trenderTemplate(w, \"stats\", p)\n}", "func statsWrapper(\n\tc *client.Client,\n\tctx context.Context,\n\tcontainerID string,\n\tstream bool,\n) (types.ContainerStats, error) {\n\tif c != nil {\n\t\treturn c.ContainerStats(ctx, containerID, stream)\n\t}\n\tfc := FakeDockerClient{}\n\treturn fc.ContainerStats(ctx, containerID, stream)\n}", "func (n *mockAgent) statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error) {\n\treturn &ContainerStats{}, nil\n}", "func collectStats(config *Config, stats chan Stats, done chan bool) {\n\tstartTime := time.Now()\n\n\t// TODO: Hoje só temos um cenário. Mas a rotina deve ser revista para trabalhar com mais de um cenário.\n\tnumberOfScenarios := 0\n\tscenariosWithError := 0\n\tnumberOfRequests := 0\n\tvar minTime time.Duration = 1<<63 - 1\n\tvar maxTime time.Duration\n\tvar totalTime time.Duration\n\tvar scenarioName string\n\n\tfor elem := range stats {\n\t\tfmt.Printf(\"elem.EndpointID: [%v]\\n\", elem.EndpointID)\n\t\tif elem.MustStat == false {\n\t\t\tcontinue\n\t\t}\n\t\tif elem.EndpointID != \"\" {\n\t\t\tnumberOfRequests += 1\n\t\t\tcontinue\n\t\t}\n\t\tnumberOfScenarios++\n\t\tscenarioName = elem.ScenarioID\n\t\tlog.Println(elem)\n\t\tif elem.Status == false {\n\t\t\tscenariosWithError++\n\t\t}\n\t\ttotalTime += elem.Duration\n\t\tif elem.Duration > maxTime {\n\t\t\tmaxTime = elem.Duration\n\t\t}\n\t\tif elem.Duration < minTime {\n\t\t\tminTime = elem.Duration\n\t\t}\n\t}\n\n\tduration := time.Since(startTime)\n\n\t// TODO: Não está sendo separado entre cenários com e sem erros\n\tlog.Printf(\"Report - Geral\")\n\tlog.Printf(\"\\tNúmero de requisições: %v\", numberOfRequests)\n\tlog.Printf(\"\\tTempo total de execução do teste: %v\", duration)\n\tlog.Printf(\"\\tRequisições por segundo: %.2f\", float64(numberOfRequests)/float64(duration.Seconds()))\n\tlog.Printf(\"\\tNúmero de IDs únicos: %v\", config.UniqueIds)\n\tlog.Printf(\"\\tCenários executados sem erros: %v (%.2f%%)\", numberOfScenarios-scenariosWithError, float64(numberOfScenarios-scenariosWithError)/float64(numberOfScenarios)*100.0)\n\tlog.Printf(\"\\tCenários executados com erros: %v (%.2f%%)\", scenariosWithError, float64(scenariosWithError)/float64(numberOfScenarios)*100.0)\n\t// log.Printf(\"\\tNúmero de cenários OK: %v\", 1)\n\t// log.Printf(\"\\tNúmero de cenários com falhas: %v\", 1)\n\tlog.Printf(\"\\tNúmero de vezes que ocorreu timeout: %v\", 0)\n\t// log.Printf(\"\\tTempos de resposta: Mínimo, médio, máximo, percentil 95%%: %v\", 1)\n\n\tlog.Printf(\"Report - Por cenário\")\n\n\t// TODO: tem que fazer o report de todos os cenarios... hoje ta assumindo que so tem 1\n\tlog.Printf(\"\\tCenário: %v\", scenarioName)\n\tlog.Printf(\"\\t\\tTempo total de execução do cenário: %v\", totalTime)\n\t// log.Printf(\"\\t\\tCenários executados: %v\", numberOfScenarios) // ou nro de execuções?\n\tlog.Printf(\"\\t\\tCenários executados sem erros: %v (%.2f%%)\", numberOfScenarios-scenariosWithError, float64(numberOfScenarios-scenariosWithError)/float64(numberOfScenarios)*100.0)\n\tlog.Printf(\"\\t\\tCenários executados com erros: %v (%.2f%%)\", scenariosWithError, float64(scenariosWithError)/float64(numberOfScenarios)*100.0)\n\tlog.Printf(\"\\t\\tNúmero de vezes que ocorreu timeout: %v\", 1)\n\t// TODO: faltou percentil 95%\n\tlog.Printf(\"\\t\\tTempo de execução (min/med/max): (%v/%v/%v)\", minTime, totalTime.Nanoseconds()/int64(numberOfScenarios), maxTime)\n\n\tlog.Printf(\"Report - Por endpoint\")\n\tlog.Printf(\"\\tEndpoint: %v\", \"xxx\")\n\tlog.Printf(\"\\t\\tTempo total de execução do endpoint: %v\", 1)\n\tlog.Printf(\"\\t\\tEndpoints executados sem erros: %v (%.2f%%)\", numberOfScenarios-scenariosWithError, float64(numberOfScenarios-scenariosWithError)/float64(numberOfScenarios)*100.0)\n\tlog.Printf(\"\\t\\tEndpoints executados com erros: %v (%.2f%%)\", scenariosWithError, float64(scenariosWithError)/float64(numberOfScenarios)*100.0)\n\tlog.Printf(\"\\t\\tNúmero de vezes que ocorreu timeout: %v\", 1)\n\t// TODO: faltou percentil 95%\n\tlog.Printf(\"\\t\\tTempo de execução (min/med/max): (%v/%v/%v)\", minTime, totalTime.Nanoseconds()/int64(numberOfScenarios), maxTime)\n\t// * Tamanho das requisições/respostas: Mínimo, médio, máximo, percentil 95% (percentil tb???)\n\n\t// TODO: Report por cenario/endpoint?\n\tdone <- true\n}", "func (sr *ServicedStatsReporter) gatherStats(t time.Time) []Sample {\n\tstats := []Sample{}\n\t// Handle the host metrics.\n\treg, _ := sr.hostRegistry.(*metrics.StandardRegistry)\n\treg.Each(func(name string, i interface{}) {\n\t\ttagmap := map[string]string{\n\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t}\n\t\tswitch metric := i.(type) {\n\t\tcase metrics.Gauge:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\tcase metrics.GaugeFloat64:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t}\n\t})\n\t// Handle each container's metrics.\n\tfor key, registry := range sr.containerRegistries {\n\t\treg, _ := registry.(*metrics.StandardRegistry)\n\t\treg.Each(func(name string, i interface{}) {\n\t\t\ttagmap := map[string]string{\n\t\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t\t\t\"controlplane_service_id\": key.serviceID,\n\t\t\t\t\"controlplane_instance_id\": strconv.FormatInt(int64(key.instanceID), 10),\n\t\t\t}\n\t\t\tswitch metric := i.(type) {\n\t\t\tcase metrics.Gauge:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\t\tcase metrics.GaugeFloat64:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t\t}\n\t\t})\n\t}\n\treturn stats\n}", "func (s *Service) Stats(r *http.Request, args *StatsArgs, result *StatsResponse) error {\n\tif args.UserID == \"\" {\n\t\tresult.Error = uidMissing\n\t\treturn nil\n\t}\n\tresult.Whole = -1\n\tresult.Bookmarks = -1\n\tresult.Pim = -1\n\tresult.Org = -1\n\tcoll := s.Session.DB(MentatDatabase).C(args.UserID)\n\twholeCount, err := coll.Count()\n\tif err != nil {\n\t\tresult.Error = fmt.Sprintf(\"failed getting stats/whole count: %s\", err)\n\t\treturn nil\n\t}\n\tresult.Whole = wholeCount\n\tif args.Detailed {\n\t\tvar entries []Entry\n\t\terr := coll.Find(bson.M{\"type\": \"bookmark\"}).All(&entries)\n\t\tif err != nil {\n\t\t\tresult.Error = fmt.Sprintf(\"failed getting stats/bookmarks count: %s\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresult.Bookmarks = len(entries)\n\t\terr = coll.Find(bson.M{\"type\": \"pim\"}).All(&entries)\n\t\tif err != nil {\n\t\t\tresult.Error = fmt.Sprintf(\"failed getting stats/pim count: %s\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresult.Pim = len(entries)\n\t\terr = coll.Find(bson.M{\"type\": \"org\"}).All(&entries)\n\t\tif err != nil {\n\t\t\tresult.Error = fmt.Sprintf(\"failed getting stats/org count: %s\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresult.Org = len(entries)\n\t}\n\treturn nil\n}", "func (this *ReceiverHolder) stats(c *gin.Context) {\n\n\tflightData := this.receiver.GetInFlightRavens()\n\n\tdeadBoxData := this.receiver.GetDeadBoxCount()\n\tboxes := make([]string, 0)\n\tfor _, box := range this.receiver.msgReceivers {\n\t\tboxes = append(boxes, box.id)\n\t}\n\n\tdata := gin.H{\n\t\t\"Queue\": this.receiver.source.GetName(),\n\t\t\"IsReliable\": this.receiver.options.isReliable,\n\t\t\"Boxes\": boxes,\n\t\t\"Inflight\": flightData,\n\t\t\"DeadBox\": deadBoxData,\n\t}\n\tc.JSON(200, data)\n}", "func (c *Collector) Transform(allStats *NodeStatsResponse) (metrics []*exportertools.Metric) {\n for _, stats := range allStats.Nodes {\n // GC Stats\n for _, gcstats := range stats.JVM.GC.Collectors {\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_count\",\n float64(gcstats.CollectionCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_sum\",\n float64(gcstats.CollectionTime / 1000),\n \"COUNTER\",\n nil))\n }\n\n // Breaker stats\n for _, bstats := range stats.Breakers {\n metrics = append(metrics, c.ConvertToMetric(\"breakers_estimated_size_bytes\",\n float64(bstats.EstimatedSize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"breakers_limit_size_bytes\",\n float64(bstats.LimitSize),\n \"GAUGE\",\n nil))\n }\n\n // Thread Pool stats\n for pool, pstats := range stats.ThreadPool {\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_completed_count\",\n float64(pstats.Completed),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_rejected_count\",\n float64(pstats.Rejected),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_active_count\",\n float64(pstats.Active),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_threads_count\",\n float64(pstats.Threads),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_largest_count\",\n float64(pstats.Largest),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_queue_count\",\n float64(pstats.Queue),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n }\n\n // JVM Memory Stats\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.HeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.HeapUsed),\n \"GAUGE\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_max_bytes\",\n float64(stats.JVM.Mem.HeapMax),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.NonHeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.NonHeapUsed),\n \"GAUGE\",\n nil))\n\n // Indices Stats)\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_memory_size_bytes\",\n float64(stats.Indices.FieldData.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_evictions\",\n float64(stats.Indices.FieldData.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_memory_size_bytes\",\n float64(stats.Indices.FilterCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_evictions\",\n float64(stats.Indices.FilterCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs\",\n float64(stats.Indices.Docs.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs_deleted\",\n float64(stats.Indices.Docs.Deleted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_memory_bytes\",\n float64(stats.Indices.Segments.Memory),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_count\",\n float64(stats.Indices.Segments.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_size_bytes\",\n float64(stats.Indices.Store.Size),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_throttle_time_ms_total\",\n float64(stats.Indices.Store.ThrottleTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_total\",\n float64(stats.Indices.Flush.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_time_ms_total\",\n float64(stats.Indices.Flush.Time),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_time_ms_total\",\n float64(stats.Indices.Indexing.IndexTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_total\",\n float64(stats.Indices.Indexing.IndexTotal),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_time_ms_total\",\n float64(stats.Indices.Merges.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_size_bytes_total\",\n float64(stats.Indices.Merges.TotalSize),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total\",\n float64(stats.Indices.Merges.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total_time_ms_total\",\n float64(stats.Indices.Refresh.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total\",\n float64(stats.Indices.Refresh.Total),\n \"COUNTER\",\n nil))\n\n // Transport Stats)\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_packets_total\",\n float64(stats.Transport.RxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_size_bytes_total\",\n float64(stats.Transport.RxSize),\n \"COUNTER\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_packets_total\",\n float64(stats.Transport.TxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_size_bytes_total\",\n float64(stats.Transport.TxSize),\n \"COUNTER\",\n nil))\n\n // Process Stats)\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_percent\",\n float64(stats.Process.CPU.Percent),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_resident_size_bytes\",\n float64(stats.Process.Memory.Resident),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_share_size_bytes\",\n float64(stats.Process.Memory.Share),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_virtual_size_bytes\",\n float64(stats.Process.Memory.TotalVirtual),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_open_files_count\",\n float64(stats.Process.OpenFD),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Total / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Sys / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.User / 1000),\n \"COUNTER\",\n nil))\n\n }\n\n return metrics\n}", "func (c *Client) containerStats(ctx context.Context, id string) (*containerdtypes.Metric, error) {\n\tif !c.lock.TrylockWithRetry(ctx, id) {\n\t\treturn nil, errtypes.ErrLockfailed\n\t}\n\tdefer c.lock.Unlock(id)\n\n\tpack, err := c.watch.get(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmetrics, err := pack.task.Metrics(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn metrics, nil\n}", "func writeCountCollections(fileName string, countCollections map[string]countCollection) error {\n\t// Take the countCollections and export it into a YAML file\n\tcountCollectionsYaml, err := yaml.Marshal(&countCollections)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create and open the counts YAML file\n\tcountCollectionsFile, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer countCollectionsFile.Close()\n\n\t// Output the generated YAML into the file\n\tfmt.Fprintf(countCollectionsFile, string(countCollectionsYaml))\n\n\t// Assuming no errors, return nil\n\treturn nil\n}", "func (c StatsController) Stats(ctx echo.Context) error {\n\tstats, err := c.statsRepository.Stats()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn JSONDetailViewOK(ctx, stats)\n}", "func GetCollectionStatList(client *mongo.Client) *CollectionStatList {\n\tcollectionStatList := &CollectionStatList{}\n\tdbNames, err := client.ListDatabaseNames(context.TODO(), bson.M{})\n\tif err != nil {\n\t\tif !logSuppressCS.Contains(keyCS) {\n\t\t\tlog.Warnf(\"%s. Collection stats will not be collected. This log message will be suppressed from now.\", err)\n\t\t\tlogSuppressCS.Add(keyCS)\n\t\t}\n\t\treturn nil\n\t}\n\n\tlogSuppressCS.Delete(keyCS)\n\tfor _, dbName := range dbNames {\n\t\tif common.IsSystemDB(dbName) {\n\t\t\tcontinue\n\t\t}\n\n\t\tcollNames, err := client.Database(dbName).ListCollectionNames(context.TODO(), bson.M{})\n\t\tif err != nil {\n\t\t\tif !logSuppressCS.Contains(dbName) {\n\t\t\t\tlog.Warnf(\"%s. Collection stats will not be collected for this db. This log message will be suppressed from now.\", err)\n\t\t\t\tlogSuppressCS.Add(dbName)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tlogSuppressCS.Delete(dbName)\n\t\tfor _, collName := range collNames {\n\t\t\tif common.IsSystemCollection(collName) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfullCollName := common.CollFullName(dbName, collName)\n\t\t\tcollStatus := CollectionStatus{}\n\t\t\tres := client.Database(dbName).RunCommand(context.TODO(), bson.D{{\"collStats\", collName}, {\"scale\", 1}})\n\t\t\tif err = res.Decode(&collStatus); err != nil {\n\t\t\t\tif !logSuppressCS.Contains(fullCollName) {\n\t\t\t\t\tlog.Warnf(\"%s. Collection stats will not be collected for this collection. This log message will be suppressed from now.\", err)\n\t\t\t\t\tlogSuppressCS.Add(fullCollName)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogSuppressCS.Delete(fullCollName)\n\t\t\tcollStatus.Database = dbName\n\t\t\tcollStatus.Name = collName\n\t\t\tcollectionStatList.Members = append(collectionStatList.Members, collStatus)\n\t\t}\n\t}\n\n\treturn collectionStatList\n}", "func (s *VicStreamProxy) StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error {\n\top := trace.FromContext(ctx, \"\")\n\tdefer trace.End(trace.Begin(config.ContainerID, op))\n\topID := op.ID()\n\n\tif s.client == nil {\n\t\treturn errors.NillPortlayerClientError(\"StreamProxy\")\n\t}\n\n\t// create a child context that we control\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tparams := containers.NewGetContainerStatsParamsWithContext(op).WithOpID(&opID)\n\tparams.ID = config.ContainerID\n\tparams.Stream = config.Stream\n\n\tconfig.Ctx = ctx\n\tconfig.Cancel = cancel\n\n\t// create our converter\n\tcontainerConverter := convert.NewContainerStats(config)\n\t// provide the writer for the portLayer and start listening for metrics\n\twriter := containerConverter.Listen()\n\tif writer == nil {\n\t\t// problem with the listener\n\t\treturn errors.InternalServerError(fmt.Sprintf(\"unable to gather container(%s) statistics\", config.ContainerID))\n\t}\n\n\t_, err := s.client.Containers.GetContainerStats(params, writer)\n\tif err != nil {\n\t\tswitch err := err.(type) {\n\t\tcase *containers.GetContainerStatsNotFound:\n\t\t\treturn errors.NotFoundError(config.ContainerID)\n\t\tcase *containers.GetContainerStatsInternalServerError:\n\t\t\treturn errors.InternalServerError(\"Server error from the interaction port layer\")\n\t\tdefault:\n\t\t\tif ctx.Err() == context.Canceled {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t//Check for EOF. Since the connection, transport, and data handling are\n\t\t\t//encapsulated inside of Swagger, we can only detect EOF by checking the\n\t\t\t//error string\n\t\t\tif strings.Contains(err.Error(), SwaggerSubstringEOF) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn errors.InternalServerError(fmt.Sprintf(\"Unknown error from the interaction port layer: %s\", err))\n\t\t}\n\t}\n\treturn nil\n}", "func (cp *Pool) StatsJSON() string {\n\tp := cp.pool()\n\tif p == nil {\n\t\treturn \"{}\"\n\t}\n\tres := p.StatsJSON()\n\tclosingBraceIndex := strings.LastIndex(res, \"}\")\n\tif closingBraceIndex == -1 { // unexpected...\n\t\treturn res\n\t}\n\treturn fmt.Sprintf(`%s, \"WaiterQueueFull\": %v}`, res[:closingBraceIndex], cp.waiterQueueFull.Load())\n}", "func TestContainerListStatsWithSandboxIdFilter(t *testing.T) {\n\tvar (\n\t\tstats []*runtime.ContainerStats\n\t\terr error\n\t)\n\tt.Logf(\"Create a pod config and run sandbox container\")\n\tsb, sbConfig := PodSandboxConfigWithCleanup(t, \"running-pod\", \"statsls\")\n\n\tpauseImage := images.Get(images.Pause)\n\tEnsureImageExists(t, pauseImage)\n\n\tt.Logf(\"Create a container config and run containers in a pod\")\n\tcontainerConfigMap := make(map[string]*runtime.ContainerConfig)\n\tfor i := 0; i < 3; i++ {\n\t\tcName := fmt.Sprintf(\"container%d\", i)\n\t\tcontainerConfig := ContainerConfig(\n\t\t\tcName,\n\t\t\tpauseImage,\n\t\t\tWithTestLabels(),\n\t\t\tWithTestAnnotations(),\n\t\t)\n\t\tcn, err := runtimeService.CreateContainer(sb, containerConfig, sbConfig)\n\t\tcontainerConfigMap[cn] = containerConfig\n\t\trequire.NoError(t, err)\n\t\tdefer func() {\n\t\t\tassert.NoError(t, runtimeService.RemoveContainer(cn))\n\t\t}()\n\t\trequire.NoError(t, runtimeService.StartContainer(cn))\n\t\tdefer func() {\n\t\t\tassert.NoError(t, runtimeService.StopContainer(cn, 10))\n\t\t}()\n\t}\n\n\tt.Logf(\"Fetch container stats for each container with Filter\")\n\trequire.NoError(t, Eventually(func() (bool, error) {\n\t\tstats, err = runtimeService.ListContainerStats(\n\t\t\t&runtime.ContainerStatsFilter{PodSandboxId: sb})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(stats) != 3 {\n\t\t\treturn false, errors.New(\"unexpected stats length\")\n\t\t}\n\n\t\tfor _, containerStats := range stats {\n\t\t\t// Wait for stats on all containers, not just the first one in the list.\n\t\t\tif containerStats.GetWritableLayer().GetTimestamp() == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}, time.Second, 45*time.Second))\n\t// TODO(claudiub): Reduce the timer above to 30 seconds once Windows flakiness has been addressed.\n\tt.Logf(\"Verify container stats for sandbox %q\", sb)\n\tfor _, s := range stats {\n\t\ttestStats(t, s, containerConfigMap[s.GetAttributes().GetId()])\n\t}\n}", "func writeStatGroupMap(w io.Writer, statGroups map[string]*statGroup) {\n\tmaxKeyLength := 0\n\tkeys := make([]string, 0, len(statGroups))\n\tfor k := range statGroups {\n\t\tif len(k) > maxKeyLength {\n\t\t\tmaxKeyLength = len(k)\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, k := range keys {\n\t\tv := statGroups[k]\n\t\tpaddedKey := fmt.Sprintf(\"%s\", k)\n\t\tfor len(paddedKey) < maxKeyLength {\n\t\t\tpaddedKey += \" \"\n\t\t}\n\n\t\t_, err := fmt.Fprintf(w, \"%s:\\n\", paddedKey)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\terr = v.write(w)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}", "func toContainerStats0(s *cgroups.Stats, ret *info.ContainerStats) {\n\tret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode\n\tret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode\n\tn := len(s.CpuStats.CpuUsage.PercpuUsage)\n\tret.Cpu.Usage.PerCpu = make([]uint64, n)\n\n\tret.Cpu.Usage.Total = 0\n\tfor i := 0; i < n; i++ {\n\t\tret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]\n\t\tret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i]\n\t}\n}", "func (t *task) Stats(_ context.Context) (*libcontainerdtypes.Stats, error) {\n\thc, err := t.getHCSContainer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treadAt := time.Now()\n\ts, err := hc.Statistics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &libcontainerdtypes.Stats{\n\t\tRead: readAt,\n\t\tHCSStats: &s,\n\t}, nil\n}", "func (m *Measurement) PrintStats(w io.Writer) {\n\ttype Hist struct {\n\t\t*Result\n\t\t*hrtime.Histogram\n\t}\n\n\thists := []Hist{}\n\tfor _, result := range m.Results {\n\t\thists = append(hists, Hist{\n\t\t\tResult: result,\n\t\t\tHistogram: hrtime.NewDurationHistogram(result.Durations, &hrtime.HistogramOptions{\n\t\t\t\tBinCount: 10,\n\t\t\t\tNiceRange: true,\n\t\t\t\tClampMaximum: 0,\n\t\t\t\tClampPercentile: 0.999,\n\t\t\t}),\n\t\t})\n\t}\n\n\tmsec := func(ns float64) string {\n\t\treturn fmt.Sprintf(\"%.2f\", ns/1e6)\n\t}\n\n\tfor _, hist := range hists {\n\t\tfmt.Fprintf(w, \"%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\n\",\n\t\t\tm.Parts, m.Segments, hist.Name,\n\t\t\tmsec(hist.Average),\n\t\t\tmsec(hist.Maximum),\n\t\t\tmsec(hist.P50),\n\t\t\tmsec(hist.P90),\n\t\t\tmsec(hist.P99),\n\t\t)\n\t}\n}", "func WriteSummary(index_name string, fields []string) {\n\n // Setting font as needed\n pdf.SetFont(\"Helvetica\",\"\",10)\n\n // a slice of Summary{} that will hold Summary{} structure for each field\n response_struct := []Summary{}\n\n // Looping through each fields requestd\n for index := range fields {\n url := fmt.Sprintf(`https://127.0.0.1:9200/%s/_search?`, index_name)\n queries := fmt.Sprintf(`\n {\n \"size\":\"0\",\n \"aggs\" : {\n \"uniq_gender\" : {\n \"terms\" : { \"field\" : \"%s.keyword\" }\n }\n }\n }`, fields[index])\n\n p, err := es.Query(\"GET\", url, queries)\n if err != nil {\n fmt.Println(\"Report Generation error ERROR: Could not get response from Elasticsearch server \", err, \"Trying to connect again\")\n return\n }\n\n temp := Summary{}\n\n err = json.Unmarshal(p, &temp)\n if (err != nil) {\n fmt.Println(\"Error unmarshalling json\",err);\n }\n\n response_struct = append(response_struct,temp);\n }\n for i :=0; i < len(response_struct); i++ {\n pdf.Write(10,fmt.Sprintf(`%s Count\\n`,fields[i]))\n //DrawLine();\n for _, v := range(response_struct[i].Aggregations.Uniq.Buck){\n pdf.Write(10,fmt.Sprintf(`%s %d\\n`,v.Key,v.Count))\n }\n }\n}", "func printStat(app *app, s stat.Stat, props stat.PostgresProperties) {\n\tapp.ui.Update(func(g *gocui.Gui) error {\n\t\tv, err := g.View(\"sysstat\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"set focus on sysstat view failed: %s\", err)\n\t\t}\n\t\tv.Clear()\n\t\terr = printSysstat(v, s)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"print sysstat failed: %s\", err)\n\t\t}\n\n\t\tv, err = g.View(\"pgstat\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"set focus on pgstat view failed: %s\", err)\n\t\t}\n\t\tv.Clear()\n\t\terr = printPgstat(v, s, props, app.db)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"print summary postgres stat failed: %s\", err)\n\t\t}\n\n\t\tv, err = g.View(\"dbstat\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"set focus on dbstat view failed: %s\", err)\n\t\t}\n\t\tv.Clear()\n\n\t\terr = printDbstat(v, app.config, s)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"print main postgres stat failed: %s\", err)\n\t\t}\n\n\t\tif app.config.view.ShowExtra > stat.CollectNone {\n\t\t\tv, err := g.View(\"extra\")\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"set focus on extra view failed: %s\", err)\n\t\t\t}\n\n\t\t\tswitch app.config.view.ShowExtra {\n\t\t\tcase stat.CollectDiskstats:\n\t\t\t\tv.Clear()\n\t\t\t\terr := printIostat(v, s.Diskstats)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase stat.CollectNetdev:\n\t\t\t\tv.Clear()\n\t\t\t\terr := printNetdev(v, s.Netdevs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase stat.CollectLogtail:\n\t\t\t\tsize, buf, err := readLogfileRecent(v, app.config.logtail)\n\t\t\t\tif err != nil {\n\t\t\t\t\tprintCmdline(g, \"Tail Postgres log failed: %s\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif size < app.config.logtail.Size {\n\t\t\t\t\tv.Clear()\n\t\t\t\t\terr := app.config.logtail.Reopen(app.db, app.postgresProps.VersionNum)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tprintCmdline(g, \"Tail Postgres log failed: %s\", err)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Update info about logfile size.\n\t\t\t\tapp.config.logtail.Size = size\n\n\t\t\t\terr = printLogtail(v, app.config.logtail.Path, buf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}", "func (benchmark *BenchmarkStat) PrintStats() {\n\tprintable := \"\"\n\tprintable += \"\\nSTATISTICS\\n\"\n\tprintable += \"===============\\n\"\n\tfor _, val := range benchmark.options {\n\t\tswitch val {\n\t\tcase Mean:\n\t\t\tif st, err := benchmark.GetStat(val); err == nil {\n\t\t\t\tprintable += st\n\t\t\t}\n\t\tcase Mode:\n\t\t\tif st, err := benchmark.GetStat(val); err == nil {\n\t\t\t\tprintable += st\n\t\t\t}\n\t\tcase Highest:\n\t\t\tif st, err := benchmark.GetStat(val); err == nil {\n\t\t\t\tprintable += st\n\t\t\t}\n\t\tcase Lowest:\n\t\t\tif st, err := benchmark.GetStat(val); err == nil {\n\t\t\t\tprintable += st\n\t\t\t}\n\t\tcase Sum:\n\t\t\tif st, err := benchmark.GetStat(val); err == nil {\n\t\t\t\tprintable += st\n\t\t\t}\n\t\tcase Range:\n\t\t\tif st, err := benchmark.GetStat(val); err == nil {\n\t\t\t\tprintable += st\n\t\t\t}\n\t\tcase All:\n\t\t\tif st, err := benchmark.GetStat(val); err == nil {\n\t\t\t\tprintable += st\n\t\t\t}\n\t\tdefault:\n\t\t\tif _, err := benchmark.GetStat(val); err != nil {\n\t\t\t\tprintable += err.Error()\n\t\t\t}\n\n\t\t}\n\t}\n\n\ts := printable\n\tfmt.Println(s)\n}", "func writeStatsHeader(to *os.File) {\n\tfmt.Fprintln(to)\n\tfmt.Fprintf(to, statsPrintHeader,\n\t\t\"Time\", \"OP\", \"Count\", \"Total Bytes\", \"Latency(min, avg, max)\", \"Throughput\", \"Error\")\n}", "func (f *RemoteRuntime) ListContainerStats(ctx context.Context, req *kubeapi.ListContainerStatsRequest) (*kubeapi.ListContainerStatsResponse, error) {\n\tstats, err := f.RuntimeService.ListContainerStats(ctx, req.Filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &kubeapi.ListContainerStatsResponse{Stats: stats}, nil\n}", "func (a *CMP) Write(metrics []telegraf.Metric) error {\n\tif len(metrics) == 0 {\n\t\treturn nil\n\t}\n\tpayload := &PostMetrics{\n\t\tMonitoringSystem: \"telegraf\",\n\t\tResourceID: a.ResourceID,\n\t}\n\n\tfor _, m := range metrics {\n\t\tlog.Printf(\"D! [CMP] Process %+v\", m)\n\n\t\tsuffix := \"\"\n\t\tcpu := m.Tags()[\"cpu\"]\n\t\tpath := m.Tags()[\"path\"]\n\t\thaproxyService := m.Tags()[\"proxy\"] + \"_\" + m.Tags()[\"sv\"]\n\t\tcontainerName := m.Tags()[\"com.docker.compose.service\"]\n\t\tdiskName := m.Tags()[\"name\"]\n\t\tdb := m.Tags()[\"db\"]\n\t\tkafkaTopic := m.Tags()[\"topic\"]\n\t\tkafkaBrokerHost := m.Tags()[\"brokerHost\"]\n\t\tmongoDBName := m.Tags()[\"db_name\"]\n\n\t\tif len(cpu) > 0 && cpu != \"cpu-total\" {\n\t\t\tsuffix = cpu[3:]\n\t\t} else if len(path) > 0 {\n\t\t\tsuffix = path\n\t\t} else if len(containerName) > 0 {\n\t\t\tsuffix = containerName\n\t\t} else if m.Name() == \"haproxy\" && len(haproxyService) > 0 {\n\t\t\tsuffix = haproxyService\n\t\t} else if m.Name() == \"diskio\" && len(diskName) > 0 {\n\t\t\tsuffix = diskName\n\t\t} else if m.Name() == \"postgresql\" && len(db) > 0 {\n\t\t\tsuffix = db\n\t\t} else if strings.HasPrefix(m.Name(), \"mongodb_\") && len(mongoDBName) > 0 {\n\t\t\tsuffix = mongoDBName\n\t\t} else if strings.HasPrefix(m.Name(), \"kafka.\") && len(kafkaTopic) > 0 {\n\t\t\tsuffix = kafkaTopic\n\t\t} else if strings.HasPrefix(m.Name(), \"kafka.\") && len(kafkaBrokerHost) > 0 {\n\t\t\tsuffix = kafkaBrokerHost\n\t\t}\n\n\t\ttimestamp := m.Time().UTC().Format(\"2006-01-02T15:04:05.999999Z\")\n\t\tfor k, v := range m.Fields() {\n\t\t\tif k == \"DelayedFetchMetrics.Count\" {\n\t\t\t\tk = fmt.Sprintf(\"%s.%s\", k, m.Tags()[\"fetcherType\"])\n\t\t\t} else if k == \"BrokerTopicMetrics.Count\" || k == \"FetcherStats.Count\" {\n\t\t\t\tk = fmt.Sprintf(\"%s.%s\", k, m.Tags()[\"name\"])\n\t\t\t} else if strings.HasPrefix(k, \"RequestMetrics.\") {\n\t\t\t\tk = fmt.Sprintf(\"%s.%s.%s\", k, m.Tags()[\"request\"], m.Tags()[\"name\"])\n\t\t\t}\n\t\t\tmetricName := m.Name() + \"-\" + strings.Replace(k, \"_\", \".\", -1)\n\t\t\ttranslation, found := translateMap[metricName]\n\t\t\tif !found {\n\t\t\t\tlog.Printf(\"D! [CMP] Skip %s\", metricName)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif translation.Conversion != nil {\n\t\t\t\tv = translation.Conversion(v)\n\t\t\t}\n\n\t\t\tspecialisations := []string{}\n\t\t\tif translation.Specialisation != \"\" {\n\t\t\t\tspecialisations = append(specialisations, translation.Specialisation)\n\t\t\t}\n\t\t\tif suffix != \"\" {\n\t\t\t\tspecialisations = append(specialisations, suffix)\n\t\t\t}\n\n\t\t\tp := DataPoint{\n\t\t\t\tCounter: translation.Counter,\n\t\t\t\tName: translation.Name,\n\t\t\t\tSpecialisation: strings.Join(specialisations, \".\"),\n\t\t\t\tUnit: translation.Unit,\n\t\t\t\tValue: fmt.Sprintf(\"%v\", v),\n\t\t\t\tTime: timestamp,\n\t\t\t}\n\t\t\tlog.Printf(\n\t\t\t\t\"D! [CMP] Create %s[%s] = %s(%s) %s\",\n\t\t\t\tp.Name,\n\t\t\t\tp.Specialisation,\n\t\t\t\tp.Value,\n\t\t\t\tp.Unit,\n\t\t\t\tp.Time,\n\t\t\t)\n\t\t\tpayload.AddMetric(p)\n\t\t}\n\t}\n\n\tcmpBytes, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to JSON-serialize the data points: %s\", err.Error())\n\t}\n\treq, err := http.NewRequest(\n\t\t\"POST\",\n\t\ta.authenticatedURL(),\n\t\tbytes.NewBuffer(cmpBytes),\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to prepare the HTTP request %s\", err.Error())\n\t}\n\n\tif a.UserAgent == \"\" {\n\t\ta.UserAgent = \"telegraf/unknown\"\n\t}\n\treq.Header.Add(\"User-Agent\", a.UserAgent)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.SetBasicAuth(a.APIUser, a.APIKey)\n\n\tlog.Printf(\n\t\t\"I! [CMP] Sending %d data points generated from %d metrics to the API\",\n\t\tlen(payload.Metrics),\n\t\tlen(metrics),\n\t)\n\tresp, err := a.client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"API call failed: %s\", err.Error())\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"E! [CMP] failed to parse CMP response body: %s\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"received a non-200 response: %s %s\", resp.Status, body)\n\t}\n\n\treturn nil\n}", "func (bi *BridgerInfo) writeStatsFile(fpath string, ins *zstypes.InsCacheInfo) (map[string]string, uint64, uint64, error) {\n\ttbytes := uint64(0)\n\n\tfp, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE, 0644)\n\tvar ntstamps = uint64(0)\n\tif err != nil {\n\t\treturn nil, ntstamps, tbytes, err\n\t}\n\tdefer func() {\n\t\terr := fp.Close()\n\t\tif err != nil {\n\t\t\tzlog.Error(\"Failed to close file %s, err %s\", fpath, err)\n\t\t}\n\t}()\n\n\tw, err := gzip.NewWriterLevel(fp, gzip.BestSpeed)\n\tif err != nil {\n\t\tzlog.Error(\"Could not create gzip writer for file %s err %s\", fpath, err)\n\t\treturn nil, ntstamps, tbytes, err\n\t}\n\n\tclblsMap := make(map[string]string) // Common labels map.\n\tif len(ins.Samples) > 0 {\n\t\tfor i := 0; i < len(ins.Samples[0].Ls); i++ {\n\t\t\tclblsMap[ins.Samples[0].Ls[i].Name] = ins.Samples[0].Ls[i].Value\n\t\t}\n\t}\n\tfor _, s := range ins.Samples {\n\t\ttbytes += uint64(len(s.Help) + len(s.Type) + 16)\n\t\tfor _, clbl := range s.Ls {\n\t\t\ttbytes += uint64(len(clbl.Name) + len(clbl.Value))\n\t\t\tval, isok := clblsMap[clbl.Name]\n\t\t\tif isok && val != clbl.Value {\n\t\t\t\tdelete(clblsMap, clbl.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tlmName := \"\"\n\tfor _, s := range ins.Samples {\n\t\toname := s.Ls.Get(labels.MetricName)\n\t\tif len(oname) == 0 {\n\t\t\tzlog.Warn(\"Missing metric name for sample %v\", s)\n\t\t\tcontinue\n\t\t}\n\t\tname, slabel := bi.adjustMetricName(oname, s.Ls, s.Type)\n\t\tif len(slabel) > 0 {\n\t\t\tdelete(clblsMap, slabel)\n\t\t}\n\n\t\tbaseMname := name\n\t\tif s.Type == \"summary\" {\n\t\t\tif strings.HasSuffix(baseMname, \"_sum\") {\n\t\t\t\tbaseMname = strings.TrimSuffix(baseMname, \"_sum\")\n\t\t\t} else if strings.HasSuffix(baseMname, \"_count\") {\n\t\t\t\tbaseMname = strings.TrimSuffix(baseMname, \"_count\")\n\t\t\t}\n\t\t} else if s.Type == \"histogram\" {\n\t\t\tif strings.HasSuffix(baseMname, \"_sum\") {\n\t\t\t\tbaseMname = strings.TrimSuffix(baseMname, \"_sum\")\n\t\t\t} else if strings.HasSuffix(baseMname, \"_count\") {\n\t\t\t\tbaseMname = strings.TrimSuffix(baseMname, \"_count\")\n\t\t\t} else if strings.HasSuffix(baseMname, \"_bucket\") {\n\t\t\t\tbaseMname = strings.TrimSuffix(baseMname, \"_bucket\")\n\t\t\t}\n\t\t}\n\n\t\tif lmName != baseMname {\n\t\t\tfmt.Fprintf(w, \"# HELP %s %s\\n\", baseMname, s.Help)\n\t\t\tif s.Type == \"unknown\" {\n\t\t\t\tfmt.Fprintf(w, \"# TYPE %s %s\\n\", baseMname, \"untyped\")\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(w, \"# TYPE %s %s\\n\", baseMname, s.Type)\n\t\t\t}\n\t\t}\n\t\tif s.Ts != ins.DTs {\n\t\t\tntstamps++\n\t\t\tfmt.Fprintf(w, \"%s%s %f %d\\n\", name, bi.getLocalLabelsString(clblsMap, s.Ls, slabel), s.Val, s.Ts)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"%s%s %f\\n\", name, bi.getLocalLabelsString(clblsMap, s.Ls, slabel), s.Val)\n\t\t}\n\n\t\tlmName = baseMname\n\t}\n\n\terr = w.Close()\n\tif err != nil {\n\t\tzlog.Error(\"Could not close gzip writer for file %s err %s\", fpath, err)\n\t\treturn nil, ntstamps, tbytes, err\n\t}\n\n\treturn clblsMap, ntstamps, tbytes, err\n}", "func TestContainerListStatsWithIdFilter(t *testing.T) {\n\tvar (\n\t\tstats []*runtime.ContainerStats\n\t\terr error\n\t)\n\tt.Logf(\"Create a pod config and run sandbox container\")\n\tsb, sbConfig := PodSandboxConfigWithCleanup(t, \"running-pod\", \"statsls\")\n\n\tpauseImage := images.Get(images.Pause)\n\tEnsureImageExists(t, pauseImage)\n\n\tt.Logf(\"Create a container config and run containers in a pod\")\n\tcontainerConfigMap := make(map[string]*runtime.ContainerConfig)\n\tfor i := 0; i < 3; i++ {\n\t\tcName := fmt.Sprintf(\"container%d\", i)\n\t\tcontainerConfig := ContainerConfig(\n\t\t\tcName,\n\t\t\tpauseImage,\n\t\t\tWithTestLabels(),\n\t\t\tWithTestAnnotations(),\n\t\t)\n\t\tcn, err := runtimeService.CreateContainer(sb, containerConfig, sbConfig)\n\t\tcontainerConfigMap[cn] = containerConfig\n\t\trequire.NoError(t, err)\n\t\tdefer func() {\n\t\t\tassert.NoError(t, runtimeService.RemoveContainer(cn))\n\t\t}()\n\t\trequire.NoError(t, runtimeService.StartContainer(cn))\n\t\tdefer func() {\n\t\t\tassert.NoError(t, runtimeService.StopContainer(cn, 10))\n\t\t}()\n\t}\n\n\tt.Logf(\"Fetch container stats for each container with Filter\")\n\tfor id := range containerConfigMap {\n\t\trequire.NoError(t, Eventually(func() (bool, error) {\n\t\t\tstats, err = runtimeService.ListContainerStats(\n\t\t\t\t&runtime.ContainerStatsFilter{Id: id})\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif len(stats) != 1 {\n\t\t\t\treturn false, errors.New(\"unexpected stats length\")\n\t\t\t}\n\t\t\tif stats[0].GetWritableLayer().GetTimestamp() != 0 {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}, time.Second, 30*time.Second))\n\n\t\tt.Logf(\"Verify container stats for %s\", id)\n\t\tfor _, s := range stats {\n\t\t\trequire.Equal(t, s.GetAttributes().GetId(), id)\n\t\t\ttestStats(t, s, containerConfigMap[id])\n\t\t}\n\t}\n}", "func (c *Client) Stats(ctx context.Context, data *StatsRequest) (*StatsResponse, error) {\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, \"/?Action=Stats\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq := req.URL.Query()\n\tif v := data.Version; v != nil {\n\t\tq.Add(\"Version\", *v)\n\t}\n\treq.URL.RawQuery = q.Encode()\n\n\tif v := data.XTopService; v != nil {\n\t\treq.Header.Set(\"X-Top-Service\", *v)\n\t}\n\tif v := data.XTopRegion; v != nil {\n\t\treq.Header.Set(\"X-Top-Region\", *v)\n\t}\n\n\tif v, ok := ctx.Value(\"K_LOGID\").(string); ok {\n\t\treq.Header.Set(\"X-TT-LOGID\", v)\n\t}\n\n\tresp, err := c.do(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar payload StatsResponse\n\td := json.NewDecoder(resp.Body)\n\tif err := d.Decode(&payload); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &payload, nil\n}", "func WriteAverages(w http.ResponseWriter, avgCollection *AvgCollection) {\n\tjs, err := json.Marshal(avgCollection)\n\n\tif err != nil {\n\t\tlog.Println(\"Cannot marshalize aggregated averages.\")\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(js)\n}", "func testStats(t *testing.T,\n\ts *runtime.ContainerStats,\n\tconfig *runtime.ContainerConfig,\n) {\n\trequire.NotEmpty(t, s.GetAttributes().GetId())\n\trequire.NotEmpty(t, s.GetAttributes().GetMetadata())\n\trequire.NotEmpty(t, s.GetAttributes().GetAnnotations())\n\trequire.Equal(t, s.GetAttributes().GetLabels(), config.Labels)\n\trequire.Equal(t, s.GetAttributes().GetAnnotations(), config.Annotations)\n\trequire.Equal(t, s.GetAttributes().GetMetadata().Name, config.Metadata.Name)\n\trequire.NotEmpty(t, s.GetAttributes().GetLabels())\n\trequire.NotEmpty(t, s.GetCpu().GetTimestamp())\n\trequire.NotEmpty(t, s.GetCpu().GetUsageCoreNanoSeconds().GetValue())\n\trequire.NotEmpty(t, s.GetMemory().GetTimestamp())\n\trequire.NotEmpty(t, s.GetMemory().GetWorkingSetBytes().GetValue())\n\trequire.NotEmpty(t, s.GetWritableLayer().GetTimestamp())\n\trequire.NotEmpty(t, s.GetWritableLayer().GetFsId().GetMountpoint())\n\n\t// UsedBytes of a fresh container can be zero on Linux, depending on the backing filesystem.\n\t// https://github.com/containerd/containerd/issues/7909\n\tif goruntime.GOOS == \"windows\" {\n\t\trequire.NotEmpty(t, s.GetWritableLayer().GetUsedBytes().GetValue())\n\t}\n\n\t// Windows does not collect inodes stats.\n\tif goruntime.GOOS != \"windows\" {\n\t\trequire.NotEmpty(t, s.GetWritableLayer().GetInodesUsed().GetValue())\n\t}\n}", "func DiffFormatWrite(ctx formatter.Context, changes []container.FilesystemChange) error {\n\trender := func(format func(subContext formatter.SubContext) error) error {\n\t\tfor _, change := range changes {\n\t\t\tif err := format(&diffContext{c: change}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn ctx.Write(newDiffContext(), render)\n}", "func printSysstat(v *gocui.View, s stat.Stat) error {\n\tvar err error\n\n\t/* line1: current time and load average */\n\t_, err = fmt.Fprintf(v, \"pgcenter: %s, load average: %.2f, %.2f, %.2f\\n\",\n\t\ttime.Now().Format(\"2006-01-02 15:04:05\"),\n\t\ts.LoadAvg.One, s.LoadAvg.Five, s.LoadAvg.Fifteen)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/* line2: cpu usage */\n\t_, err = fmt.Fprintf(v, \" %%cpu: \\033[37;1m%4.1f\\033[0m us, \\033[37;1m%4.1f\\033[0m sy, \\033[37;1m%4.1f\\033[0m ni, \\033[37;1m%4.1f\\033[0m id, \\033[37;1m%4.1f\\033[0m wa, \\033[37;1m%4.1f\\033[0m hi, \\033[37;1m%4.1f\\033[0m si, \\033[37;1m%4.1f\\033[0m st\\n\",\n\t\ts.CpuStat.User, s.CpuStat.Sys, s.CpuStat.Nice, s.CpuStat.Idle,\n\t\ts.CpuStat.Iowait, s.CpuStat.Irq, s.CpuStat.Softirq, s.CpuStat.Steal)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/* line3: memory usage */\n\t_, err = fmt.Fprintf(v, \" MiB mem: \\033[37;1m%6d\\033[0m total, \\033[37;1m%6d\\033[0m free, \\033[37;1m%6d\\033[0m used, \\033[37;1m%8d\\033[0m buff/cached\\n\",\n\t\ts.Meminfo.MemTotal, s.Meminfo.MemFree, s.Meminfo.MemUsed,\n\t\ts.Meminfo.MemCached+s.Meminfo.MemBuffers+s.Meminfo.MemSlab)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/* line4: swap usage, dirty and writeback */\n\t_, err = fmt.Fprintf(v, \"MiB swap: \\033[37;1m%6d\\033[0m total, \\033[37;1m%6d\\033[0m free, \\033[37;1m%6d\\033[0m used, \\033[37;1m%6d/%d\\033[0m dirty/writeback\\n\",\n\t\ts.Meminfo.SwapTotal, s.Meminfo.SwapFree, s.Meminfo.SwapUsed,\n\t\ts.Meminfo.MemDirty, s.Meminfo.MemWriteback)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func printStats(start time.Time, numFiles int64, totalFileSize int64, numErrors int) {\n\tfileSizeMB := totalFileSize / 1024 / 1024\n\texecTime := time.Since(start).Seconds()\n\tfmt.Println()\n\tfmt.Println(\"-------------- RUNTIME STATS -----------------\")\n\tfmt.Printf(\"hornet version : %s\\n\", version)\n\tfmt.Printf(\"date : %s\\n\", time.Now().Format(time.RFC1123))\n\tfmt.Printf(\"elapsed time : %f s\\n\", execTime)\n\tfmt.Printf(\"# file errors : %d\\n\", numErrors)\n\tfmt.Printf(\"files processed : %d\\n\", numFiles)\n\tfmt.Printf(\"data processed : %d MB\\n\", fileSizeMB)\n\tfmt.Printf(\"throughput : %f MB/s\\n\", float64(fileSizeMB)/execTime)\n}", "func (w *StandardWriter) formatScreen(output *ResultEvent) []byte {\n\tbuilder := &bytes.Buffer{}\n\tbuilder.WriteRune('[')\n\tbuilder.WriteString(color.CyanString(output.Time.Format(\"2006-01-02 15:04:05\")))\n\tbuilder.WriteString(\"] \")\n\tbuilder.WriteRune('[')\n\tbuilder.WriteString(color.RedString(output.Target))\n\tbuilder.WriteString(\"] \")\n\tbuilder.WriteRune('[')\n\tbuilder.WriteString(color.YellowString(output.Info.Service))\n\tbuilder.WriteString(\"] \")\n\n\tif output.Info.Service == \"ssl/tls\" || output.Info.Service == \"http\"{\n\t\tbuilder.WriteRune('[')\n\t\tbuilder.WriteString(color.YellowString(output.Info.Cert))\n\t\tbuilder.WriteString(\"] \")\n\t}\n\tif output.WorkingEvent != nil{\n\t\tswitch tmp := output.WorkingEvent.(type) {\n\t\tcase Ghttp.Result:\n\t\t\tbuilder.WriteString(tmp.ToString())\n\t\tdefault:\n\t\t\tbuilder.WriteString(conversion.ToString(tmp))\n\t\t}\n\t}else{\n\t\tbuilder.WriteRune('[')\n\t\tbuilder.WriteString(color.GreenString(output.Info.Banner))\n\t\tbuilder.WriteString(\"] \")\n\t}\n\treturn builder.Bytes()\n}", "func FormatMetrics(cs ...prometheus.Collector) string {\n\tregistry := prometheus.NewRegistry()\n\tregistry.MustRegister(cs...)\n\tmfs, _ := registry.Gather()\n\twriter := &bytes.Buffer{}\n\tenc := expfmt.NewEncoder(writer, expfmt.FmtText)\n\tfor _, mf := range mfs {\n\t\tenc.Encode(mf)\n\t}\n\treturn writer.String()\n}", "func NewStatsFormat(source, osType string) formatter.Format {\n\tif source == formatter.TableFormatKey {\n\t\tif osType == winOSType {\n\t\t\treturn formatter.Format(winDefaultStatsTableFormat)\n\t\t}\n\t\treturn formatter.Format(defaultStatsTableFormat)\n\t} else if source == formatter.AutoRangeFormatKey {\n\t\treturn formatter.Format(autoRangeStatsTableFormat)\n\t}\n\treturn formatter.Format(source)\n}", "func infoKindFmt(w io.Writer, x interface{}, format string) {\n\tfmt.Fprintf(w, infoKinds[x.(SpotKind)])\t// infoKind entries are html-escaped\n}", "func PoolStats(name string) (map[string]interface{}, error) {\n\tcmd := &Cmd{}\n\tres := make(map[string]interface{})\n\terr := NvlistIoctl(zfsHandle.Fd(), ZFS_IOC_POOL_STATS, name, cmd, nil, res, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif cmd.Cookie != 0 {\n\t\treturn nil, syscall.Errno(cmd.Cookie)\n\t}\n\treturn res, nil\n}", "func renderStatisticsLayout(uiView View) {\n\tp1 := widgets.NewParagraph()\n\tp1.Title = fmt.Sprintf(\" Statistics : last %v \", uiView.TimeframeRepr[uiView.ActiveTimeframe])\n\tp1.Text = fmt.Sprintf(\"Press s to switch to a %v timeframe.\\n\\n Statistics are loading ...\", uiView.TimeframeRepr[3-uiView.ActiveTimeframe])\n\tp1.TextStyle.Fg = ui.ColorYellow\n\tp1.SetRect(0, 3, 75, 26)\n\tp1.BorderStyle.Fg = ui.ColorCyan\n\n\tp2 := widgets.NewParagraph()\n\tp2.Title = fmt.Sprintf(\" Details %v \", Shorten(uiView.Urls[uiView.ActiveWebsite]))\n\tp2.Text = \"Press a website's id to view details\"\n\tp2.TextStyle.Fg = ui.ColorYellow\n\tp2.SetRect(0, 26, 75, 50)\n\tp2.BorderStyle.Fg = ui.ColorCyan\n\tui.Render(p1, p2)\n}", "func (f *FormatGeoFormat) Write(w io.Writer, wc Collection) error {\n\tif _, err := fmt.Fprintf(w, \"$FormatGEO\\r\\n\"); err != nil {\n\t\treturn err\n\t}\n\tfor _, wp := range wc {\n\t\tlatDeg, latMin, latSec, latHemi := dmsh(wp.Latitude, ns)\n\t\tlngDeg, lngMin, lngSec, lngHemi := dmsh(wp.Longitude, ew)\n\t\tif _, err := fmt.Fprintf(w, \"%-8s %c %02d %02d %05.2f %c %03d %02d %05.2f %4d %s\\r\\n\", wp.ID, latHemi, latDeg, latMin, latSec, lngHemi, lngDeg, lngMin, lngSec, int(wp.Altitude), wp.Description); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (backends Backends) log_stats() {\n\tfor _, backend := range backends {\n\t\tdowntime := backend.downtime\n\t\tif backend.failed {\n\t\t\tdowntime += time.Now().Sub(backend.failedTime)\n\t\t}\n\t\tlog.Printf(\"STATS: <%s> failed=%v (downtime=%v) requests=%d bytes=%d errors=%d last=%s\",\n\t\t\tbackend.address, backend.failed, downtime, backend.requests,\n\t\t\tbackend.transferred, backend.errors, backend.timestamp)\n\t}\n}", "func (ts *EventService) Stats(dim repository.Stats) []map[string]interface{} {\n\tallStats := make([]map[string]interface{}, 0, 1)\n\tfor k, v := range ts.eventRepo.Events(dim) {\n\t\tstat := map[string]interface{}{\n\t\t\tstring(dim): k,\n\t\t\t\"pageViews\": v,\n\t\t}\n\t\tallStats = append(allStats, stat)\n\t}\n\treturn allStats\n}", "func (m *memStats) Log(segment string) {\n\tvar s runtime.MemStats\n\truntime.ReadMemStats(&s)\n\tm.logf(\"\\n\\n[%s] %d-collections:\\n\", segment, m.Collections)\n\tm.indent += 2\n\n\tm.logf(\"General statistics\\n\")\n\tm.indent += 2\n\tm.logColumns(\n\t\t[]interface{}{\"Alloc\", m.fmtBytes(s.Alloc), \"(allocated and still in use)\"},\n\t\t[]interface{}{\"TotalAlloc\", m.fmtBytes(s.TotalAlloc), \"(allocated (even if freed))\"},\n\t\t[]interface{}{\"Sys\", m.fmtBytes(s.Sys), \"(obtained from system (sum of XxxSys below))\"},\n\t\t[]interface{}{\"Lookups\", s.Lookups, \"(number of pointer lookups)\"},\n\t\t[]interface{}{\"Mallocs\", s.Mallocs, \"(number of mallocs)\"},\n\t\t[]interface{}{\"Frees\", s.Frees, \"(number of frees)\"},\n\t)\n\tm.indent -= 2\n\tfmt.Printf(\"\\n\")\n\n\tm.logf(\"Main allocation heap statistics\\n\")\n\tm.indent += 2\n\tm.logColumns(\n\t\t[]interface{}{\"HeapAlloc\", m.fmtBytes(s.HeapAlloc), \"(allocated and still in use)\"},\n\t\t[]interface{}{\"HeapSys\", m.fmtBytes(s.HeapSys), \"(obtained from system)\"},\n\t\t[]interface{}{\"HeapIdle\", m.fmtBytes(s.HeapIdle), \"(in idle spans)\"},\n\t\t[]interface{}{\"HeapInuse\", m.fmtBytes(s.HeapInuse), \"(in non-idle span)\"},\n\t\t[]interface{}{\"HeapReleased\", m.fmtBytes(s.HeapReleased), \"(released to the OS)\"},\n\t\t[]interface{}{\"HeapObjects\", s.HeapObjects, \"(total number of allocated objects)\"},\n\t)\n\tm.indent -= 2\n\tfmt.Printf(\"\\n\")\n\n\tm.logf(\"Low-level fixed-size structure allocator statistics\\n\")\n\tm.indent += 2\n\tm.logColumns(\n\t\t[]interface{}{\"StackInuse\", m.fmtBytes(s.StackInuse), \"(used by stack allocator right now)\"},\n\t\t[]interface{}{\"StackSys\", m.fmtBytes(s.StackSys), \"(obtained from system)\"},\n\t\t[]interface{}{\"MSpanInuse\", m.fmtBytes(s.MSpanInuse), \"(mspan structures / in use now)\"},\n\t\t[]interface{}{\"MSpanSys\", m.fmtBytes(s.MSpanSys), \"(obtained from system)\"},\n\t\t[]interface{}{\"MCacheInuse\", m.fmtBytes(s.MCacheInuse), \"(in use now))\"},\n\t\t[]interface{}{\"MCacheSys\", m.fmtBytes(s.MCacheSys), \"(mcache structures / obtained from system)\"},\n\t\t[]interface{}{\"BuckHashSys\", m.fmtBytes(s.BuckHashSys), \"(profiling bucket hash table / obtained from system)\"},\n\t\t[]interface{}{\"GCSys\", m.fmtBytes(s.GCSys), \"(GC metadata / obtained form system)\"},\n\t\t[]interface{}{\"OtherSys\", m.fmtBytes(s.OtherSys), \"(other system allocations)\"},\n\t)\n\tfmt.Printf(\"\\n\")\n\tm.indent -= 2\n\n\t// TODO(slimsag): remaining GC fields may be useful later:\n\t/*\n\t // Garbage collector statistics.\n\t NextGC uint64 // next collection will happen when HeapAlloc ≥ this amount\n\t LastGC uint64 // end time of last collection (nanoseconds since 1970)\n\t PauseTotalNs uint64\n\t PauseNs [256]uint64 // circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]\n\t PauseEnd [256]uint64 // circular buffer of recent GC pause end times\n\t NumGC uint32\n\t EnableGC bool\n\t DebugGC bool\n\n\t // Per-size allocation statistics.\n\t // 61 is NumSizeClasses in the C code.\n\t BySize [61]struct {\n\t Size uint32\n\t Mallocs uint64\n\t Frees uint64\n\t }\n\t*/\n}", "func jsonStatsHandler(w http.ResponseWriter, r *http.Request) {\n\trunID := r.FormValue(\"runID\")\n\tstats, histogram, err := resultStore.GetStats(runID)\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, fmt.Sprintf(\"Failed to retrieve stats for run %s\", runID))\n\t}\n\tsendJsonResponse(w, map[string]map[string]int{\"stats\": stats, \"histogram\": histogram})\n}", "func writeSubscriptionMetrics(object *SubscriptionMetrics, stream *jsoniter.Stream) {\n\tcount := 0\n\tstream.WriteObjectStart()\n\tvar present_ bool\n\tpresent_ = object.bitmap_&1 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"cloud_provider\")\n\t\tstream.WriteString(object.cloudProvider)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&2 != 0 && object.computeNodesCpu != nil\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"compute_nodes_cpu\")\n\t\twriteClusterResource(object.computeNodesCpu, stream)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&4 != 0 && object.computeNodesMemory != nil\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"compute_nodes_memory\")\n\t\twriteClusterResource(object.computeNodesMemory, stream)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&8 != 0 && object.computeNodesSockets != nil\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"compute_nodes_sockets\")\n\t\twriteClusterResource(object.computeNodesSockets, stream)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&16 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"console_url\")\n\t\tstream.WriteString(object.consoleUrl)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&32 != 0 && object.cpu != nil\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"cpu\")\n\t\twriteClusterResource(object.cpu, stream)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&64 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"critical_alerts_firing\")\n\t\tstream.WriteFloat64(object.criticalAlertsFiring)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&128 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"health_state\")\n\t\tstream.WriteString(object.healthState)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&256 != 0 && object.memory != nil\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"memory\")\n\t\twriteClusterResource(object.memory, stream)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&512 != 0 && object.nodes != nil\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"nodes\")\n\t\twriteClusterMetricsNodes(object.nodes, stream)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&1024 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"openshift_version\")\n\t\tstream.WriteString(object.openshiftVersion)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&2048 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"operating_system\")\n\t\tstream.WriteString(object.operatingSystem)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&4096 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"operators_condition_failing\")\n\t\tstream.WriteFloat64(object.operatorsConditionFailing)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&8192 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"region\")\n\t\tstream.WriteString(object.region)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&16384 != 0 && object.sockets != nil\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"sockets\")\n\t\twriteClusterResource(object.sockets, stream)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&32768 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"state\")\n\t\tstream.WriteString(object.state)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&65536 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"state_description\")\n\t\tstream.WriteString(object.stateDescription)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&131072 != 0 && object.storage != nil\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"storage\")\n\t\twriteClusterResource(object.storage, stream)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&262144 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"subscription_cpu_total\")\n\t\tstream.WriteFloat64(object.subscriptionCpuTotal)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&524288 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"subscription_obligation_exists\")\n\t\tstream.WriteFloat64(object.subscriptionObligationExists)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&1048576 != 0\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"subscription_socket_total\")\n\t\tstream.WriteFloat64(object.subscriptionSocketTotal)\n\t\tcount++\n\t}\n\tpresent_ = object.bitmap_&2097152 != 0 && object.upgrade != nil\n\tif present_ {\n\t\tif count > 0 {\n\t\t\tstream.WriteMore()\n\t\t}\n\t\tstream.WriteObjectField(\"upgrade\")\n\t\twriteClusterUpgrade(object.upgrade, stream)\n\t\tcount++\n\t}\n\tstream.WriteObjectEnd()\n}", "func SessionStatHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, fmt.Sprintf(\"{ \\\"Session\\\" : [ %s, %s ] }\", qpsData.QpsString(),\n\t\tfmt.Sprintf(\"{ \\\"total requests\\\" : %s }\", requestStat.String())))\n}", "func (sr *ServicedStatsReporter) updateStats() {\n\t// Stats for host.\n\tsr.updateHostStats()\n\t// Stats for the containers.\n\tstates, err := zkservice.GetHostStates(sr.conn, \"\", sr.hostID)\n\tif err != nil {\n\t\tplog.WithFields(logrus.Fields{\n\t\t\t\"conn\": sr.conn,\n\t\t\t\"hostID\": sr.hostID,\n\t\t}).WithError(err).Error(\"Could not get host states from Zookeeper\")\n\t}\n\n\tfor _, rs := range states {\n\t\tif rs.ContainerID != \"\" {\n\n\t\t\tcontainerRegistry := sr.getOrCreateContainerRegistry(rs.ServiceID, rs.InstanceID)\n\t\t\tstats, err := sr.docker.GetContainerStats(rs.ContainerID, 30*time.Second)\n\t\t\tif err != nil || stats == nil { //stats may be nil if service is shutting down\n\t\t\t\tplog.WithFields(logrus.Fields{\n\t\t\t\t\t\"serviceID\": rs.ServiceID,\n\t\t\t\t\t\"instanceID\": rs.InstanceID,\n\t\t\t\t}).WithError(err).Warn(\"Couldn't get stats from docker\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Check to see if we have the previous stats for this running instance\n\t\t\tusePreviousStats := true\n\t\t\tkey := rs.ContainerID\n\t\t\tif _, found := sr.previousStats[key]; !found {\n\t\t\t\tsr.previousStats[key] = make(map[string]uint64)\n\t\t\t\tusePreviousStats = false\n\t\t\t}\n\n\t\t\t// CPU Stats\n\t\t\t// TODO: Consolidate this into a single object that both ISVCS and non-ISVCS can use\n\t\t\tvar (\n\t\t\t\tkernelCPUPercent float64\n\t\t\t\tuserCPUPercent float64\n\t\t\t\ttotalCPUChange uint64\n\t\t\t)\n\n\t\t\tkernelCPU := stats.CPUStats.CPUUsage.UsageInKernelmode\n\t\t\tuserCPU := stats.CPUStats.CPUUsage.UsageInUsermode\n\t\t\ttotalCPU := stats.CPUStats.SystemCPUUsage\n\n\t\t\t// Total CPU Cycles\n\t\t\tpreviousTotalCPU, found := sr.previousStats[key][\"totalCPU\"]\n\t\t\tif found {\n\t\t\t\tif totalCPU <= previousTotalCPU {\n\t\t\t\t\tplog.WithFields(logrus.Fields{\n\t\t\t\t\t\t\"totalCPU\": totalCPU,\n\t\t\t\t\t\t\"previousTotalCPU\": previousTotalCPU,\n\t\t\t\t\t}).Debug(\"Change in total CPU usage was nonpositive, skipping CPU stats update\")\n\t\t\t\t\tusePreviousStats = false\n\t\t\t\t} else {\n\t\t\t\t\ttotalCPUChange = totalCPU - previousTotalCPU\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tusePreviousStats = false\n\t\t\t}\n\t\t\tsr.previousStats[key][\"totalCPU\"] = totalCPU\n\n\t\t\t// CPU Cycles in Kernel mode\n\t\t\tif previousKernelCPU, found := sr.previousStats[key][\"kernelCPU\"]; found && usePreviousStats {\n\t\t\t\tkernelCPUChange := kernelCPU - previousKernelCPU\n\t\t\t\tkernelCPUPercent = (float64(kernelCPUChange) / float64(totalCPUChange)) * float64(len(stats.CPUStats.CPUUsage.PercpuUsage)) * 100.0\n\t\t\t} else {\n\t\t\t\tusePreviousStats = false\n\t\t\t}\n\t\t\tsr.previousStats[key][\"kernelCPU\"] = kernelCPU\n\n\t\t\t// CPU Cycles in User mode\n\t\t\tif previousUserCPU, found := sr.previousStats[key][\"userCPU\"]; found && usePreviousStats {\n\t\t\t\tuserCPUChange := userCPU - previousUserCPU\n\t\t\t\tuserCPUPercent = (float64(userCPUChange) / float64(totalCPUChange)) * float64(len(stats.CPUStats.CPUUsage.PercpuUsage)) * 100.0\n\t\t\t} else {\n\t\t\t\tusePreviousStats = false\n\t\t\t}\n\t\t\tsr.previousStats[key][\"userCPU\"] = userCPU\n\n\t\t\t// Update CPU metrics\n\t\t\tif usePreviousStats {\n\t\t\t\tmetrics.GetOrRegisterGaugeFloat64(\"docker.usageinkernelmode\", containerRegistry).Update(kernelCPUPercent)\n\t\t\t\tmetrics.GetOrRegisterGaugeFloat64(\"docker.usageinusermode\", containerRegistry).Update(userCPUPercent)\n\t\t\t} else {\n\t\t\t\tplog.WithFields(logrus.Fields{\n\t\t\t\t\t\"serviceID\": rs.ServiceID,\n\t\t\t\t\t\"instanceID\": rs.InstanceID,\n\t\t\t\t}).Debug(\"Skipping CPU stats, no previous values to compare to\")\n\t\t\t}\n\n\t\t\t// Memory Stats\n\t\t\tpgFault := int64(stats.MemoryStats.Stats.Pgfault)\n\t\t\ttotalRSS := int64(stats.MemoryStats.Stats.TotalRss)\n\t\t\tcache := int64(stats.MemoryStats.Stats.Cache)\n\t\t\tif pgFault < 0 || totalRSS < 0 || cache < 0 {\n\t\t\t\tplog.WithFields(logrus.Fields{\n\t\t\t\t\t\"serviceID\": rs.ServiceID,\n\t\t\t\t\t\"instanceID\": rs.InstanceID,\n\t\t\t\t}).Debug(\"Memory metric value too big for int64\")\n\t\t\t}\n\t\t\tmetrics.GetOrRegisterGauge(\"cgroup.memory.pgmajfault\", containerRegistry).Update(pgFault)\n\t\t\tmetrics.GetOrRegisterGauge(\"cgroup.memory.totalrss\", containerRegistry).Update(totalRSS)\n\t\t\tmetrics.GetOrRegisterGauge(\"cgroup.memory.cache\", containerRegistry).Update(cache)\n\n\t\t} else {\n\t\t\tplog.WithFields(logrus.Fields{\n\t\t\t\t\"serviceID\": rs.ServiceID,\n\t\t\t\t\"instanceID\": rs.InstanceID,\n\t\t\t}).Debug(\"Skipping stats update, no container ID exists\")\n\t\t}\n\t}\n\t// Clean out old container registries\n\tsr.removeStaleRegistries(states)\n}", "func getTokensStats(w http.ResponseWriter, r *http.Request) error {\n\tctx := r.Context()\n\n\ttimeStart, timeEnd, _, err := parseTimes(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// set timeFrame to get sums\n\ttimeFrame := timeEnd.Sub(timeStart)\n\n\tsortKey := r.URL.Query().Get(\"sort\")\n\tif sortKey == \"\" {\n\t\tsortKey = \"-liquidityUSD\"\n\t}\n\t// slightly confusingly, if + not provided, do asc (even tho -liquidityUSD is default)\n\tsortDesc := sortKey == \"\" || strings.HasPrefix(sortKey, \"-\")\n\tif strings.HasPrefix(sortKey, \"-\") || strings.HasPrefix(sortKey, \"+\") {\n\t\tsortKey = sortKey[1:]\n\t}\n\n\tstats, err := db.GetTokenBuckets(ctx, \"\", timeStart, timeEnd, timeFrame)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO(reed): we prob can't do this here in combination with pagination, we\n\t// need to bound the domain for time_frame, push down to db and not use\n\t// client side aggregation to have rows for each time_frame in order to avoid\n\t// data overload? on the other hand, if we load all the data into the cache\n\t// and do paging / sorting of the cached data, that makes more sense? needs a\n\t// good think since firebase doesn't support doing sums and then paging over\n\t// it (we have to do them)\n\tsortTokenBuckets(stats, sortKey, sortDesc)\n\n\tgotils.WriteObject(w, http.StatusOK, map[string]interface{}{\n\t\t\"stats\": stats,\n\t})\n\n\treturn nil\n}" ]
[ "0.61173224", "0.6038604", "0.5830485", "0.57845944", "0.5730382", "0.5693483", "0.56528944", "0.5436239", "0.5433749", "0.53959244", "0.5391447", "0.53292865", "0.5329039", "0.53219116", "0.5286652", "0.5284806", "0.52831936", "0.52702236", "0.52642286", "0.526065", "0.5255122", "0.5250148", "0.524361", "0.5205274", "0.5196625", "0.5170019", "0.5145844", "0.51447284", "0.5138525", "0.5120584", "0.5116853", "0.5113276", "0.51114905", "0.510827", "0.51017374", "0.5099634", "0.5041147", "0.50196564", "0.50069577", "0.49885008", "0.49850228", "0.4979792", "0.49785817", "0.4972941", "0.49701494", "0.49574", "0.4955793", "0.4941858", "0.49268118", "0.49233633", "0.4919181", "0.4914587", "0.4914218", "0.49072284", "0.48970428", "0.48968104", "0.4886367", "0.48838165", "0.4882166", "0.48783273", "0.48729146", "0.48456645", "0.4842333", "0.4842277", "0.48380867", "0.48057026", "0.4802826", "0.4800099", "0.47892448", "0.47819278", "0.47790733", "0.4776057", "0.47740877", "0.4766358", "0.47543514", "0.47525525", "0.47472793", "0.47355214", "0.47347856", "0.46909028", "0.46900705", "0.4689703", "0.46665952", "0.46579275", "0.46491343", "0.4648615", "0.46459138", "0.46288988", "0.4608914", "0.46045536", "0.46036366", "0.460099", "0.46006525", "0.46006367", "0.45984003", "0.45957258", "0.4593899", "0.4590564", "0.45879847", "0.45866343" ]
0.75491524
0